Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: ISC
0002 
0003 #include <linux/etherdevice.h>
0004 #include <linux/timekeeping.h>
0005 #include "mt7603.h"
0006 #include "mac.h"
0007 #include "../trace.h"
0008 
0009 #define MT_PSE_PAGE_SIZE    128
0010 
0011 static u32
0012 mt7603_ac_queue_mask0(u32 mask)
0013 {
0014     u32 ret = 0;
0015 
0016     ret |= GENMASK(3, 0) * !!(mask & BIT(0));
0017     ret |= GENMASK(8, 5) * !!(mask & BIT(1));
0018     ret |= GENMASK(13, 10) * !!(mask & BIT(2));
0019     ret |= GENMASK(19, 16) * !!(mask & BIT(3));
0020     return ret;
0021 }
0022 
0023 static void
0024 mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask)
0025 {
0026     mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask));
0027 }
0028 
0029 static void
0030 mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
0031 {
0032     mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
0033 }
0034 
0035 void mt7603_mac_reset_counters(struct mt7603_dev *dev)
0036 {
0037     int i;
0038 
0039     for (i = 0; i < 2; i++)
0040         mt76_rr(dev, MT_TX_AGG_CNT(i));
0041 
0042     memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
0043 }
0044 
0045 void mt7603_mac_set_timing(struct mt7603_dev *dev)
0046 {
0047     u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
0048           FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
0049     u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
0050            FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
0051     int offset = 3 * dev->coverage_class;
0052     u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
0053              FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
0054     bool is_5ghz = dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ;
0055     int sifs;
0056     u32 val;
0057 
0058     if (is_5ghz)
0059         sifs = 16;
0060     else
0061         sifs = 10;
0062 
0063     mt76_set(dev, MT_ARB_SCR,
0064          MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
0065     udelay(1);
0066 
0067     mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset);
0068     mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset);
0069     mt76_wr(dev, MT_IFS,
0070         FIELD_PREP(MT_IFS_EIFS, 360) |
0071         FIELD_PREP(MT_IFS_RIFS, 2) |
0072         FIELD_PREP(MT_IFS_SIFS, sifs) |
0073         FIELD_PREP(MT_IFS_SLOT, dev->slottime));
0074 
0075     if (dev->slottime < 20 || is_5ghz)
0076         val = MT7603_CFEND_RATE_DEFAULT;
0077     else
0078         val = MT7603_CFEND_RATE_11B;
0079 
0080     mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val);
0081 
0082     mt76_clear(dev, MT_ARB_SCR,
0083            MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
0084 }
0085 
0086 static void
0087 mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask)
0088 {
0089     mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
0090          FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
0091 
0092     mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
0093 }
0094 
0095 static u32
0096 mt7603_wtbl1_addr(int idx)
0097 {
0098     return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
0099 }
0100 
0101 static u32
0102 mt7603_wtbl2_addr(int idx)
0103 {
0104     /* Mapped to WTBL2 */
0105     return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE;
0106 }
0107 
0108 static u32
0109 mt7603_wtbl3_addr(int idx)
0110 {
0111     u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE);
0112 
0113     return base + idx * MT_WTBL3_SIZE;
0114 }
0115 
0116 static u32
0117 mt7603_wtbl4_addr(int idx)
0118 {
0119     u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE);
0120 
0121     return base + idx * MT_WTBL4_SIZE;
0122 }
0123 
0124 void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
0125               const u8 *mac_addr)
0126 {
0127     const void *_mac = mac_addr;
0128     u32 addr = mt7603_wtbl1_addr(idx);
0129     u32 w0 = 0, w1 = 0;
0130     int i;
0131 
0132     if (_mac) {
0133         w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI,
0134                 get_unaligned_le16(_mac + 4));
0135         w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO,
0136                 get_unaligned_le32(_mac));
0137     }
0138 
0139     if (vif < 0)
0140         vif = 0;
0141     else
0142         w0 |= MT_WTBL1_W0_RX_CHECK_A1;
0143     w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif);
0144 
0145     mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
0146 
0147     mt76_set(dev, addr + 0 * 4, w0);
0148     mt76_set(dev, addr + 1 * 4, w1);
0149     mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL);
0150 
0151     mt76_stop_tx_ac(dev, GENMASK(3, 0));
0152     addr = mt7603_wtbl2_addr(idx);
0153     for (i = 0; i < MT_WTBL2_SIZE; i += 4)
0154         mt76_wr(dev, addr + i, 0);
0155     mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
0156     mt76_start_tx_ac(dev, GENMASK(3, 0));
0157 
0158     addr = mt7603_wtbl3_addr(idx);
0159     for (i = 0; i < MT_WTBL3_SIZE; i += 4)
0160         mt76_wr(dev, addr + i, 0);
0161 
0162     addr = mt7603_wtbl4_addr(idx);
0163     for (i = 0; i < MT_WTBL4_SIZE; i += 4)
0164         mt76_wr(dev, addr + i, 0);
0165 
0166     mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
0167 }
0168 
0169 static void
0170 mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled)
0171 {
0172     u32 addr = mt7603_wtbl1_addr(idx);
0173     u32 val = mt76_rr(dev, addr + 3 * 4);
0174 
0175     val &= ~MT_WTBL1_W3_SKIP_TX;
0176     val |= enabled * MT_WTBL1_W3_SKIP_TX;
0177 
0178     mt76_wr(dev, addr + 3 * 4, val);
0179 }
0180 
0181 void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort)
0182 {
0183     int i, port, queue;
0184 
0185     if (abort) {
0186         port = 3; /* PSE */
0187         queue = 8; /* free queue */
0188     } else {
0189         port = 0; /* HIF */
0190         queue = 1; /* MCU queue */
0191     }
0192 
0193     mt7603_wtbl_set_skip_tx(dev, idx, true);
0194 
0195     mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN |
0196             FIELD_PREP(MT_TX_ABORT_WCID, idx));
0197 
0198     for (i = 0; i < 4; i++) {
0199         mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
0200             FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) |
0201             FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) |
0202             FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) |
0203             FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
0204 
0205         mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 15000);
0206     }
0207 
0208     WARN_ON_ONCE(mt76_rr(dev, MT_DMA_FQCR0) & MT_DMA_FQCR0_BUSY);
0209 
0210     mt76_wr(dev, MT_TX_ABORT, 0);
0211 
0212     mt7603_wtbl_set_skip_tx(dev, idx, false);
0213 }
0214 
0215 void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
0216               bool enabled)
0217 {
0218     u32 addr = mt7603_wtbl1_addr(sta->wcid.idx);
0219 
0220     if (sta->smps == enabled)
0221         return;
0222 
0223     mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled);
0224     sta->smps = enabled;
0225 }
0226 
0227 void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
0228             bool enabled)
0229 {
0230     int idx = sta->wcid.idx;
0231     u32 addr;
0232 
0233     spin_lock_bh(&dev->ps_lock);
0234 
0235     if (sta->ps == enabled)
0236         goto out;
0237 
0238     mt76_wr(dev, MT_PSE_RTA,
0239         FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) |
0240         FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) |
0241         FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) |
0242         FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) |
0243         MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY);
0244 
0245     mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
0246 
0247     if (enabled)
0248         mt7603_filter_tx(dev, idx, false);
0249 
0250     addr = mt7603_wtbl1_addr(idx);
0251     mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
0252     mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE,
0253          enabled * MT_WTBL1_W3_POWER_SAVE);
0254     mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
0255     sta->ps = enabled;
0256 
0257 out:
0258     spin_unlock_bh(&dev->ps_lock);
0259 }
0260 
0261 void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx)
0262 {
0263     int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE;
0264     int wtbl2_frame = idx / wtbl2_frame_size;
0265     int wtbl2_entry = idx % wtbl2_frame_size;
0266 
0267     int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE;
0268     int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE;
0269     int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size;
0270     int wtbl3_entry = (idx % wtbl3_frame_size) * 2;
0271 
0272     int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE;
0273     int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE;
0274     int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size;
0275     int wtbl4_entry = idx % wtbl4_frame_size;
0276 
0277     u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
0278     int i;
0279 
0280     mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
0281 
0282     mt76_wr(dev, addr + 0 * 4,
0283         MT_WTBL1_W0_RX_CHECK_A1 |
0284         MT_WTBL1_W0_RX_CHECK_A2 |
0285         MT_WTBL1_W0_RX_VALID);
0286     mt76_wr(dev, addr + 1 * 4, 0);
0287     mt76_wr(dev, addr + 2 * 4, 0);
0288 
0289     mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
0290 
0291     mt76_wr(dev, addr + 3 * 4,
0292         FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) |
0293         FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) |
0294         FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) |
0295         MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM);
0296     mt76_wr(dev, addr + 4 * 4,
0297         FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) |
0298         FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) |
0299         FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry));
0300 
0301     mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
0302 
0303     addr = mt7603_wtbl2_addr(idx);
0304 
0305     /* Clear BA information */
0306     mt76_wr(dev, addr + (15 * 4), 0);
0307 
0308     mt76_stop_tx_ac(dev, GENMASK(3, 0));
0309     for (i = 2; i <= 4; i++)
0310         mt76_wr(dev, addr + (i * 4), 0);
0311     mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
0312     mt76_start_tx_ac(dev, GENMASK(3, 0));
0313 
0314     mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR);
0315     mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR);
0316     mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
0317 }
0318 
0319 void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
0320 {
0321     struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
0322     int idx = msta->wcid.idx;
0323     u8 ampdu_density;
0324     u32 addr;
0325     u32 val;
0326 
0327     addr = mt7603_wtbl1_addr(idx);
0328 
0329     ampdu_density = sta->deflink.ht_cap.ampdu_density;
0330     if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
0331         ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
0332 
0333     val = mt76_rr(dev, addr + 2 * 4);
0334     val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
0335     val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR,
0336               sta->deflink.ht_cap.ampdu_factor) |
0337            FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY,
0338               sta->deflink.ht_cap.ampdu_density) |
0339            MT_WTBL1_W2_TXS_BAF_REPORT;
0340 
0341     if (sta->deflink.ht_cap.cap)
0342         val |= MT_WTBL1_W2_HT;
0343     if (sta->deflink.vht_cap.cap)
0344         val |= MT_WTBL1_W2_VHT;
0345 
0346     mt76_wr(dev, addr + 2 * 4, val);
0347 
0348     addr = mt7603_wtbl2_addr(idx);
0349     val = mt76_rr(dev, addr + 9 * 4);
0350     val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
0351          MT_WTBL2_W9_SHORT_GI_80);
0352     if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
0353         val |= MT_WTBL2_W9_SHORT_GI_20;
0354     if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
0355         val |= MT_WTBL2_W9_SHORT_GI_40;
0356     mt76_wr(dev, addr + 9 * 4, val);
0357 }
0358 
0359 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
0360 {
0361     mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr));
0362     mt76_wr(dev, MT_BA_CONTROL_1,
0363         (get_unaligned_le16(addr + 4) |
0364          FIELD_PREP(MT_BA_CONTROL_1_TID, tid) |
0365          MT_BA_CONTROL_1_RESET));
0366 }
0367 
0368 void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
0369                 int ba_size)
0370 {
0371     u32 addr = mt7603_wtbl2_addr(wcid);
0372     u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
0373                (MT_WTBL2_W15_BA_WIN_SIZE <<
0374             (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT));
0375     u32 tid_val;
0376     int i;
0377 
0378     if (ba_size < 0) {
0379         /* disable */
0380         mt76_clear(dev, addr + (15 * 4), tid_mask);
0381         return;
0382     }
0383 
0384     for (i = 7; i > 0; i--) {
0385         if (ba_size >= MT_AGG_SIZE_LIMIT(i))
0386             break;
0387     }
0388 
0389     tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
0390           i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT);
0391 
0392     mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
0393 }
0394 
0395 void mt7603_mac_sta_poll(struct mt7603_dev *dev)
0396 {
0397     static const u8 ac_to_tid[4] = {
0398         [IEEE80211_AC_BE] = 0,
0399         [IEEE80211_AC_BK] = 1,
0400         [IEEE80211_AC_VI] = 4,
0401         [IEEE80211_AC_VO] = 6
0402     };
0403     struct ieee80211_sta *sta;
0404     struct mt7603_sta *msta;
0405     u32 total_airtime = 0;
0406     u32 airtime[4];
0407     u32 addr;
0408     int i;
0409 
0410     rcu_read_lock();
0411 
0412     while (1) {
0413         bool clear = false;
0414 
0415         spin_lock_bh(&dev->sta_poll_lock);
0416         if (list_empty(&dev->sta_poll_list)) {
0417             spin_unlock_bh(&dev->sta_poll_lock);
0418             break;
0419         }
0420 
0421         msta = list_first_entry(&dev->sta_poll_list, struct mt7603_sta,
0422                     poll_list);
0423         list_del_init(&msta->poll_list);
0424         spin_unlock_bh(&dev->sta_poll_lock);
0425 
0426         addr = mt7603_wtbl4_addr(msta->wcid.idx);
0427         for (i = 0; i < 4; i++) {
0428             u32 airtime_last = msta->tx_airtime_ac[i];
0429 
0430             msta->tx_airtime_ac[i] = mt76_rr(dev, addr + i * 8);
0431             airtime[i] = msta->tx_airtime_ac[i] - airtime_last;
0432             airtime[i] *= 32;
0433             total_airtime += airtime[i];
0434 
0435             if (msta->tx_airtime_ac[i] & BIT(22))
0436                 clear = true;
0437         }
0438 
0439         if (clear) {
0440             mt7603_wtbl_update(dev, msta->wcid.idx,
0441                        MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
0442             memset(msta->tx_airtime_ac, 0,
0443                    sizeof(msta->tx_airtime_ac));
0444         }
0445 
0446         if (!msta->wcid.sta)
0447             continue;
0448 
0449         sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
0450         for (i = 0; i < 4; i++) {
0451             struct mt76_queue *q = dev->mphy.q_tx[i];
0452             u8 qidx = q->hw_idx;
0453             u8 tid = ac_to_tid[i];
0454             u32 txtime = airtime[qidx];
0455 
0456             if (!txtime)
0457                 continue;
0458 
0459             ieee80211_sta_register_airtime(sta, tid, txtime, 0);
0460         }
0461     }
0462 
0463     rcu_read_unlock();
0464 
0465     if (!total_airtime)
0466         return;
0467 
0468     spin_lock_bh(&dev->mt76.cc_lock);
0469     dev->mphy.chan_state->cc_tx += total_airtime;
0470     spin_unlock_bh(&dev->mt76.cc_lock);
0471 }
0472 
0473 static struct mt76_wcid *
0474 mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
0475 {
0476     struct mt7603_sta *sta;
0477     struct mt76_wcid *wcid;
0478 
0479     if (idx >= MT7603_WTBL_SIZE)
0480         return NULL;
0481 
0482     wcid = rcu_dereference(dev->mt76.wcid[idx]);
0483     if (unicast || !wcid)
0484         return wcid;
0485 
0486     if (!wcid->sta)
0487         return NULL;
0488 
0489     sta = container_of(wcid, struct mt7603_sta, wcid);
0490     if (!sta->vif)
0491         return NULL;
0492 
0493     return &sta->vif->sta.wcid;
0494 }
0495 
0496 int
0497 mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
0498 {
0499     struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
0500     struct ieee80211_supported_band *sband;
0501     struct ieee80211_hdr *hdr;
0502     __le32 *rxd = (__le32 *)skb->data;
0503     u32 rxd0 = le32_to_cpu(rxd[0]);
0504     u32 rxd1 = le32_to_cpu(rxd[1]);
0505     u32 rxd2 = le32_to_cpu(rxd[2]);
0506     bool unicast = rxd1 & MT_RXD1_NORMAL_U2M;
0507     bool insert_ccmp_hdr = false;
0508     bool remove_pad;
0509     int idx;
0510     int i;
0511 
0512     memset(status, 0, sizeof(*status));
0513 
0514     i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
0515     sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband;
0516     i >>= 1;
0517 
0518     idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
0519     status->wcid = mt7603_rx_get_wcid(dev, idx, unicast);
0520 
0521     status->band = sband->band;
0522     if (i < sband->n_channels)
0523         status->freq = sband->channels[i].center_freq;
0524 
0525     if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
0526         status->flag |= RX_FLAG_FAILED_FCS_CRC;
0527 
0528     if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
0529         status->flag |= RX_FLAG_MMIC_ERROR;
0530 
0531     /* ICV error or CCMP/BIP/WPI MIC error */
0532     if (rxd2 & MT_RXD2_NORMAL_ICV_ERR)
0533         status->flag |= RX_FLAG_ONLY_MONITOR;
0534 
0535     if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
0536         !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
0537         status->flag |= RX_FLAG_DECRYPTED;
0538         status->flag |= RX_FLAG_IV_STRIPPED;
0539         status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
0540     }
0541 
0542     remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
0543 
0544     if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
0545         return -EINVAL;
0546 
0547     if (!sband->channels)
0548         return -EINVAL;
0549 
0550     rxd += 4;
0551     if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
0552         rxd += 4;
0553         if ((u8 *)rxd - skb->data >= skb->len)
0554             return -EINVAL;
0555     }
0556     if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
0557         u8 *data = (u8 *)rxd;
0558 
0559         if (status->flag & RX_FLAG_DECRYPTED) {
0560             switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
0561             case MT_CIPHER_AES_CCMP:
0562             case MT_CIPHER_CCMP_CCX:
0563             case MT_CIPHER_CCMP_256:
0564                 insert_ccmp_hdr =
0565                     FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
0566                 fallthrough;
0567             case MT_CIPHER_TKIP:
0568             case MT_CIPHER_TKIP_NO_MIC:
0569             case MT_CIPHER_GCMP:
0570             case MT_CIPHER_GCMP_256:
0571                 status->iv[0] = data[5];
0572                 status->iv[1] = data[4];
0573                 status->iv[2] = data[3];
0574                 status->iv[3] = data[2];
0575                 status->iv[4] = data[1];
0576                 status->iv[5] = data[0];
0577                 break;
0578             default:
0579                 break;
0580             }
0581         }
0582 
0583         rxd += 4;
0584         if ((u8 *)rxd - skb->data >= skb->len)
0585             return -EINVAL;
0586     }
0587     if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
0588         status->timestamp = le32_to_cpu(rxd[0]);
0589         status->flag |= RX_FLAG_MACTIME_START;
0590 
0591         if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
0592                   MT_RXD2_NORMAL_NON_AMPDU))) {
0593             status->flag |= RX_FLAG_AMPDU_DETAILS;
0594 
0595             /* all subframes of an A-MPDU have the same timestamp */
0596             if (dev->rx_ampdu_ts != status->timestamp) {
0597                 if (!++dev->ampdu_ref)
0598                     dev->ampdu_ref++;
0599             }
0600             dev->rx_ampdu_ts = status->timestamp;
0601 
0602             status->ampdu_ref = dev->ampdu_ref;
0603         }
0604 
0605         rxd += 2;
0606         if ((u8 *)rxd - skb->data >= skb->len)
0607             return -EINVAL;
0608     }
0609     if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
0610         u32 rxdg0 = le32_to_cpu(rxd[0]);
0611         u32 rxdg3 = le32_to_cpu(rxd[3]);
0612         bool cck = false;
0613 
0614         i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
0615         switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
0616         case MT_PHY_TYPE_CCK:
0617             cck = true;
0618             fallthrough;
0619         case MT_PHY_TYPE_OFDM:
0620             i = mt76_get_rate(&dev->mt76, sband, i, cck);
0621             break;
0622         case MT_PHY_TYPE_HT_GF:
0623         case MT_PHY_TYPE_HT:
0624             status->encoding = RX_ENC_HT;
0625             if (i > 15)
0626                 return -EINVAL;
0627             break;
0628         default:
0629             return -EINVAL;
0630         }
0631 
0632         if (rxdg0 & MT_RXV1_HT_SHORT_GI)
0633             status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
0634         if (rxdg0 & MT_RXV1_HT_AD_CODE)
0635             status->enc_flags |= RX_ENC_FLAG_LDPC;
0636 
0637         status->enc_flags |= RX_ENC_FLAG_STBC_MASK *
0638                     FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
0639 
0640         status->rate_idx = i;
0641 
0642         status->chains = dev->mphy.antenna_mask;
0643         status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
0644                       dev->rssi_offset[0];
0645         status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
0646                       dev->rssi_offset[1];
0647 
0648         if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
0649             status->bw = RATE_INFO_BW_40;
0650 
0651         rxd += 6;
0652         if ((u8 *)rxd - skb->data >= skb->len)
0653             return -EINVAL;
0654     } else {
0655         return -EINVAL;
0656     }
0657 
0658     skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
0659 
0660     if (insert_ccmp_hdr) {
0661         u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
0662 
0663         mt76_insert_ccmp_hdr(skb, key_id);
0664     }
0665 
0666     hdr = (struct ieee80211_hdr *)skb->data;
0667     if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
0668         return 0;
0669 
0670     status->aggr = unicast &&
0671                !ieee80211_is_qos_nullfunc(hdr->frame_control);
0672     status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
0673     status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
0674 
0675     return 0;
0676 }
0677 
0678 static u16
0679 mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
0680                const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw)
0681 {
0682     u8 phy, nss, rate_idx;
0683     u16 rateval;
0684 
0685     *bw = 0;
0686     if (rate->flags & IEEE80211_TX_RC_MCS) {
0687         rate_idx = rate->idx;
0688         nss = 1 + (rate->idx >> 3);
0689         phy = MT_PHY_TYPE_HT;
0690         if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
0691             phy = MT_PHY_TYPE_HT_GF;
0692         if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
0693             *bw = 1;
0694     } else {
0695         const struct ieee80211_rate *r;
0696         int band = dev->mphy.chandef.chan->band;
0697         u16 val;
0698 
0699         nss = 1;
0700         r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
0701         if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
0702             val = r->hw_value_short;
0703         else
0704             val = r->hw_value;
0705 
0706         phy = val >> 8;
0707         rate_idx = val & 0xff;
0708     }
0709 
0710     rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
0711            FIELD_PREP(MT_TX_RATE_MODE, phy));
0712 
0713     if (stbc && nss == 1)
0714         rateval |= MT_TX_RATE_STBC;
0715 
0716     return rateval;
0717 }
0718 
0719 void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
0720                struct ieee80211_tx_rate *probe_rate,
0721                struct ieee80211_tx_rate *rates)
0722 {
0723     struct ieee80211_tx_rate *ref;
0724     int wcid = sta->wcid.idx;
0725     u32 addr = mt7603_wtbl2_addr(wcid);
0726     bool stbc = false;
0727     int n_rates = sta->n_rates;
0728     u8 bw, bw_prev, bw_idx = 0;
0729     u16 val[4];
0730     u16 probe_val;
0731     u32 w9 = mt76_rr(dev, addr + 9 * 4);
0732     bool rateset;
0733     int i, k;
0734 
0735     if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
0736         return;
0737 
0738     for (i = n_rates; i < 4; i++)
0739         rates[i] = rates[n_rates - 1];
0740 
0741     rateset = !(sta->rate_set_tsf & BIT(0));
0742     memcpy(sta->rateset[rateset].rates, rates,
0743            sizeof(sta->rateset[rateset].rates));
0744     if (probe_rate) {
0745         sta->rateset[rateset].probe_rate = *probe_rate;
0746         ref = &sta->rateset[rateset].probe_rate;
0747     } else {
0748         sta->rateset[rateset].probe_rate.idx = -1;
0749         ref = &sta->rateset[rateset].rates[0];
0750     }
0751 
0752     rates = sta->rateset[rateset].rates;
0753     for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
0754         /*
0755          * We don't support switching between short and long GI
0756          * within the rate set. For accurate tx status reporting, we
0757          * need to make sure that flags match.
0758          * For improved performance, avoid duplicate entries by
0759          * decrementing the MCS index if necessary
0760          */
0761         if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
0762             rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
0763 
0764         for (k = 0; k < i; k++) {
0765             if (rates[i].idx != rates[k].idx)
0766                 continue;
0767             if ((rates[i].flags ^ rates[k].flags) &
0768                 IEEE80211_TX_RC_40_MHZ_WIDTH)
0769                 continue;
0770 
0771             if (!rates[i].idx)
0772                 continue;
0773 
0774             rates[i].idx--;
0775         }
0776     }
0777 
0778     w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
0779           MT_WTBL2_W9_SHORT_GI_80;
0780 
0781     val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
0782     bw_prev = bw;
0783 
0784     if (probe_rate) {
0785         probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
0786         if (bw)
0787             bw_idx = 1;
0788         else
0789             bw_prev = 0;
0790     } else {
0791         probe_val = val[0];
0792     }
0793 
0794     w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw);
0795     w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw);
0796 
0797     val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
0798     if (bw_prev) {
0799         bw_idx = 3;
0800         bw_prev = bw;
0801     }
0802 
0803     val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
0804     if (bw_prev) {
0805         bw_idx = 5;
0806         bw_prev = bw;
0807     }
0808 
0809     val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
0810     if (bw_prev)
0811         bw_idx = 7;
0812 
0813     w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE,
0814                bw_idx ? bw_idx - 1 : 7);
0815 
0816     mt76_wr(dev, MT_WTBL_RIUCR0, w9);
0817 
0818     mt76_wr(dev, MT_WTBL_RIUCR1,
0819         FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
0820         FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
0821         FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
0822 
0823     mt76_wr(dev, MT_WTBL_RIUCR2,
0824         FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
0825         FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
0826         FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
0827         FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
0828 
0829     mt76_wr(dev, MT_WTBL_RIUCR3,
0830         FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
0831         FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
0832         FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
0833 
0834     mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
0835     sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
0836 
0837     mt76_wr(dev, MT_WTBL_UPDATE,
0838         FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
0839         MT_WTBL_UPDATE_RATE_UPDATE |
0840         MT_WTBL_UPDATE_TX_COUNT_CLEAR);
0841 
0842     if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
0843         mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
0844 
0845     sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
0846     sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
0847 }
0848 
0849 static enum mt76_cipher_type
0850 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
0851 {
0852     memset(key_data, 0, 32);
0853     if (!key)
0854         return MT_CIPHER_NONE;
0855 
0856     if (key->keylen > 32)
0857         return MT_CIPHER_NONE;
0858 
0859     memcpy(key_data, key->key, key->keylen);
0860 
0861     switch (key->cipher) {
0862     case WLAN_CIPHER_SUITE_WEP40:
0863         return MT_CIPHER_WEP40;
0864     case WLAN_CIPHER_SUITE_WEP104:
0865         return MT_CIPHER_WEP104;
0866     case WLAN_CIPHER_SUITE_TKIP:
0867         /* Rx/Tx MIC keys are swapped */
0868         memcpy(key_data + 16, key->key + 24, 8);
0869         memcpy(key_data + 24, key->key + 16, 8);
0870         return MT_CIPHER_TKIP;
0871     case WLAN_CIPHER_SUITE_CCMP:
0872         return MT_CIPHER_AES_CCMP;
0873     default:
0874         return MT_CIPHER_NONE;
0875     }
0876 }
0877 
0878 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
0879             struct ieee80211_key_conf *key)
0880 {
0881     enum mt76_cipher_type cipher;
0882     u32 addr = mt7603_wtbl3_addr(wcid);
0883     u8 key_data[32];
0884     int key_len = sizeof(key_data);
0885 
0886     cipher = mt7603_mac_get_key_info(key, key_data);
0887     if (cipher == MT_CIPHER_NONE && key)
0888         return -EOPNOTSUPP;
0889 
0890     if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) {
0891         addr += key->keyidx * 16;
0892         key_len = 16;
0893     }
0894 
0895     mt76_wr_copy(dev, addr, key_data, key_len);
0896 
0897     addr = mt7603_wtbl1_addr(wcid);
0898     mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher);
0899     if (key)
0900         mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx);
0901     mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key);
0902 
0903     return 0;
0904 }
0905 
0906 static int
0907 mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
0908               struct sk_buff *skb, enum mt76_txq_id qid,
0909               struct mt76_wcid *wcid, struct ieee80211_sta *sta,
0910               int pid, struct ieee80211_key_conf *key)
0911 {
0912     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0913     struct ieee80211_tx_rate *rate = &info->control.rates[0];
0914     struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0915     struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
0916     struct ieee80211_vif *vif = info->control.vif;
0917     struct mt76_queue *q = dev->mphy.q_tx[qid];
0918     struct mt7603_vif *mvif;
0919     int wlan_idx;
0920     int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
0921     int tx_count = 8;
0922     u8 frame_type, frame_subtype;
0923     u16 fc = le16_to_cpu(hdr->frame_control);
0924     u16 seqno = 0;
0925     u8 vif_idx = 0;
0926     u32 val;
0927     u8 bw;
0928 
0929     if (vif) {
0930         mvif = (struct mt7603_vif *)vif->drv_priv;
0931         vif_idx = mvif->idx;
0932         if (vif_idx && qid >= MT_TXQ_BEACON)
0933             vif_idx += 0x10;
0934     }
0935 
0936     if (sta) {
0937         struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
0938 
0939         tx_count = msta->rate_count;
0940     }
0941 
0942     if (wcid)
0943         wlan_idx = wcid->idx;
0944     else
0945         wlan_idx = MT7603_WTBL_RESERVED;
0946 
0947     frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2;
0948     frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4;
0949 
0950     val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
0951           FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx);
0952     txwi[0] = cpu_to_le32(val);
0953 
0954     val = MT_TXD1_LONG_FORMAT |
0955           FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) |
0956           FIELD_PREP(MT_TXD1_TID,
0957              skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
0958           FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
0959           FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) |
0960           FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) |
0961           FIELD_PREP(MT_TXD1_PROTECTED, !!key);
0962     txwi[1] = cpu_to_le32(val);
0963 
0964     if (info->flags & IEEE80211_TX_CTL_NO_ACK)
0965         txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK);
0966 
0967     val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) |
0968           FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) |
0969           FIELD_PREP(MT_TXD2_MULTICAST,
0970              is_multicast_ether_addr(hdr->addr1));
0971     txwi[2] = cpu_to_le32(val);
0972 
0973     if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
0974         txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
0975 
0976     txwi[4] = 0;
0977 
0978     val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
0979           FIELD_PREP(MT_TXD5_PID, pid);
0980     txwi[5] = cpu_to_le32(val);
0981 
0982     txwi[6] = 0;
0983 
0984     if (rate->idx >= 0 && rate->count &&
0985         !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
0986         bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
0987         u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw);
0988 
0989         txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
0990 
0991         val = MT_TXD6_FIXED_BW |
0992               FIELD_PREP(MT_TXD6_BW, bw) |
0993               FIELD_PREP(MT_TXD6_TX_RATE, rateval);
0994         txwi[6] |= cpu_to_le32(val);
0995 
0996         if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
0997             txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
0998 
0999         if (!(rate->flags & IEEE80211_TX_RC_MCS))
1000             txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
1001 
1002         tx_count = rate->count;
1003     }
1004 
1005     /* use maximum tx count for beacons and buffered multicast */
1006     if (qid >= MT_TXQ_BEACON)
1007         tx_count = 0x1f;
1008 
1009     val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
1010           MT_TXD3_SN_VALID;
1011 
1012     if (ieee80211_is_data_qos(hdr->frame_control))
1013         seqno = le16_to_cpu(hdr->seq_ctrl);
1014     else if (ieee80211_is_back_req(hdr->frame_control))
1015         seqno = le16_to_cpu(bar->start_seq_num);
1016     else
1017         val &= ~MT_TXD3_SN_VALID;
1018 
1019     val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
1020 
1021     txwi[3] = cpu_to_le32(val);
1022 
1023     if (key) {
1024         u64 pn = atomic64_inc_return(&key->tx_pn);
1025 
1026         txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID);
1027         txwi[4] = cpu_to_le32(pn & GENMASK(31, 0));
1028         txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32));
1029     }
1030 
1031     txwi[7] = 0;
1032 
1033     return 0;
1034 }
1035 
1036 int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1037               enum mt76_txq_id qid, struct mt76_wcid *wcid,
1038               struct ieee80211_sta *sta,
1039               struct mt76_tx_info *tx_info)
1040 {
1041     struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1042     struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
1043     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1044     struct ieee80211_key_conf *key = info->control.hw_key;
1045     int pid;
1046 
1047     if (!wcid)
1048         wcid = &dev->global_sta.wcid;
1049 
1050     if (sta) {
1051         msta = (struct mt7603_sta *)sta->drv_priv;
1052 
1053         if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
1054                     IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
1055             (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
1056             mt7603_wtbl_set_ps(dev, msta, false);
1057 
1058         mt76_tx_check_agg_ssn(sta, tx_info->skb);
1059     }
1060 
1061     pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1062 
1063     if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
1064         spin_lock_bh(&dev->mt76.lock);
1065         mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
1066                       msta->rates);
1067         msta->rate_probe = true;
1068         spin_unlock_bh(&dev->mt76.lock);
1069     }
1070 
1071     mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
1072                   sta, pid, key);
1073 
1074     return 0;
1075 }
1076 
1077 static bool
1078 mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
1079         struct ieee80211_tx_info *info, __le32 *txs_data)
1080 {
1081     struct ieee80211_supported_band *sband;
1082     struct mt7603_rate_set *rs;
1083     int first_idx = 0, last_idx;
1084     u32 rate_set_tsf;
1085     u32 final_rate;
1086     u32 final_rate_flags;
1087     bool rs_idx;
1088     bool ack_timeout;
1089     bool fixed_rate;
1090     bool probe;
1091     bool ampdu;
1092     bool cck = false;
1093     int count;
1094     u32 txs;
1095     int idx;
1096     int i;
1097 
1098     fixed_rate = info->status.rates[0].count;
1099     probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1100 
1101     txs = le32_to_cpu(txs_data[4]);
1102     ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
1103     count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
1104     last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs);
1105 
1106     txs = le32_to_cpu(txs_data[0]);
1107     final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1108     ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
1109 
1110     if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
1111         return false;
1112 
1113     if (txs & MT_TXS0_QUEUE_TIMEOUT)
1114         return false;
1115 
1116     if (!ack_timeout)
1117         info->flags |= IEEE80211_TX_STAT_ACK;
1118 
1119     info->status.ampdu_len = 1;
1120     info->status.ampdu_ack_len = !!(info->flags &
1121                     IEEE80211_TX_STAT_ACK);
1122 
1123     if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
1124         info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
1125 
1126     first_idx = max_t(int, 0, last_idx - (count - 1) / MT7603_RATE_RETRY);
1127 
1128     if (fixed_rate && !probe) {
1129         info->status.rates[0].count = count;
1130         i = 0;
1131         goto out;
1132     }
1133 
1134     rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
1135     rs_idx = !((u32)(le32_get_bits(txs_data[1], MT_TXS1_F0_TIMESTAMP) -
1136              rate_set_tsf) < 1000000);
1137     rs_idx ^= rate_set_tsf & BIT(0);
1138     rs = &sta->rateset[rs_idx];
1139 
1140     if (!first_idx && rs->probe_rate.idx >= 0) {
1141         info->status.rates[0] = rs->probe_rate;
1142 
1143         spin_lock_bh(&dev->mt76.lock);
1144         if (sta->rate_probe) {
1145             mt7603_wtbl_set_rates(dev, sta, NULL,
1146                           sta->rates);
1147             sta->rate_probe = false;
1148         }
1149         spin_unlock_bh(&dev->mt76.lock);
1150     } else {
1151         info->status.rates[0] = rs->rates[first_idx / 2];
1152     }
1153     info->status.rates[0].count = 0;
1154 
1155     for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
1156         struct ieee80211_tx_rate *cur_rate;
1157         int cur_count;
1158 
1159         cur_rate = &rs->rates[idx / 2];
1160         cur_count = min_t(int, MT7603_RATE_RETRY, count);
1161         count -= cur_count;
1162 
1163         if (idx && (cur_rate->idx != info->status.rates[i].idx ||
1164                 cur_rate->flags != info->status.rates[i].flags)) {
1165             i++;
1166             if (i == ARRAY_SIZE(info->status.rates)) {
1167                 i--;
1168                 break;
1169             }
1170 
1171             info->status.rates[i] = *cur_rate;
1172             info->status.rates[i].count = 0;
1173         }
1174 
1175         info->status.rates[i].count += cur_count;
1176     }
1177 
1178 out:
1179     final_rate_flags = info->status.rates[i].flags;
1180 
1181     switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1182     case MT_PHY_TYPE_CCK:
1183         cck = true;
1184         fallthrough;
1185     case MT_PHY_TYPE_OFDM:
1186         if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
1187             sband = &dev->mphy.sband_5g.sband;
1188         else
1189             sband = &dev->mphy.sband_2g.sband;
1190         final_rate &= GENMASK(5, 0);
1191         final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
1192                        cck);
1193         final_rate_flags = 0;
1194         break;
1195     case MT_PHY_TYPE_HT_GF:
1196     case MT_PHY_TYPE_HT:
1197         final_rate_flags |= IEEE80211_TX_RC_MCS;
1198         final_rate &= GENMASK(5, 0);
1199         if (final_rate > 15)
1200             return false;
1201         break;
1202     default:
1203         return false;
1204     }
1205 
1206     info->status.rates[i].idx = final_rate;
1207     info->status.rates[i].flags = final_rate_flags;
1208 
1209     return true;
1210 }
1211 
1212 static bool
1213 mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
1214                __le32 *txs_data)
1215 {
1216     struct mt76_dev *mdev = &dev->mt76;
1217     struct sk_buff_head list;
1218     struct sk_buff *skb;
1219 
1220     if (pid < MT_PACKET_ID_FIRST)
1221         return false;
1222 
1223     trace_mac_txdone(mdev, sta->wcid.idx, pid);
1224 
1225     mt76_tx_status_lock(mdev, &list);
1226     skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1227     if (skb) {
1228         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1229 
1230         if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
1231             info->status.rates[0].count = 0;
1232             info->status.rates[0].idx = -1;
1233         }
1234 
1235         mt76_tx_status_skb_done(mdev, skb, &list);
1236     }
1237     mt76_tx_status_unlock(mdev, &list);
1238 
1239     return !!skb;
1240 }
1241 
1242 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
1243 {
1244     struct ieee80211_tx_info info = {};
1245     struct ieee80211_sta *sta = NULL;
1246     struct mt7603_sta *msta = NULL;
1247     struct mt76_wcid *wcid;
1248     __le32 *txs_data = data;
1249     u8 wcidx;
1250     u8 pid;
1251 
1252     pid = le32_get_bits(txs_data[4], MT_TXS4_PID);
1253     wcidx = le32_get_bits(txs_data[3], MT_TXS3_WCID);
1254 
1255     if (pid == MT_PACKET_ID_NO_ACK)
1256         return;
1257 
1258     if (wcidx >= MT7603_WTBL_SIZE)
1259         return;
1260 
1261     rcu_read_lock();
1262 
1263     wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1264     if (!wcid)
1265         goto out;
1266 
1267     msta = container_of(wcid, struct mt7603_sta, wcid);
1268     sta = wcid_to_sta(wcid);
1269 
1270     if (list_empty(&msta->poll_list)) {
1271         spin_lock_bh(&dev->sta_poll_lock);
1272         list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1273         spin_unlock_bh(&dev->sta_poll_lock);
1274     }
1275 
1276     if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
1277         goto out;
1278 
1279     if (wcidx >= MT7603_WTBL_STA || !sta)
1280         goto out;
1281 
1282     if (mt7603_fill_txs(dev, msta, &info, txs_data))
1283         ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
1284 
1285 out:
1286     rcu_read_unlock();
1287 }
1288 
1289 void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1290 {
1291     struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1292     struct sk_buff *skb = e->skb;
1293 
1294     if (!e->txwi) {
1295         dev_kfree_skb_any(skb);
1296         return;
1297     }
1298 
1299     dev->tx_hang_check = 0;
1300     mt76_tx_complete_skb(mdev, e->wcid, skb);
1301 }
1302 
1303 static bool
1304 wait_for_wpdma(struct mt7603_dev *dev)
1305 {
1306     return mt76_poll(dev, MT_WPDMA_GLO_CFG,
1307              MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
1308              MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
1309              0, 1000);
1310 }
1311 
1312 static void mt7603_pse_reset(struct mt7603_dev *dev)
1313 {
1314     /* Clear previous reset result */
1315     if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED])
1316         mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S);
1317 
1318     /* Reset PSE */
1319     mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
1320 
1321     if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET,
1322                 MT_MCU_DEBUG_RESET_PSE_S,
1323                 MT_MCU_DEBUG_RESET_PSE_S, 500)) {
1324         dev->reset_cause[RESET_CAUSE_RESET_FAILED]++;
1325         mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
1326     } else {
1327         dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
1328         mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES);
1329     }
1330 
1331     if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3)
1332         dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
1333 }
1334 
1335 void mt7603_mac_dma_start(struct mt7603_dev *dev)
1336 {
1337     mt7603_mac_start(dev);
1338 
1339     wait_for_wpdma(dev);
1340     usleep_range(50, 100);
1341 
1342     mt76_set(dev, MT_WPDMA_GLO_CFG,
1343          (MT_WPDMA_GLO_CFG_TX_DMA_EN |
1344           MT_WPDMA_GLO_CFG_RX_DMA_EN |
1345           FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
1346           MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE));
1347 
1348     mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
1349 }
1350 
1351 void mt7603_mac_start(struct mt7603_dev *dev)
1352 {
1353     mt76_clear(dev, MT_ARB_SCR,
1354            MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1355     mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0);
1356     mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
1357 }
1358 
1359 void mt7603_mac_stop(struct mt7603_dev *dev)
1360 {
1361     mt76_set(dev, MT_ARB_SCR,
1362          MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1363     mt76_wr(dev, MT_WF_ARB_TX_START_0, 0);
1364     mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
1365 }
1366 
1367 void mt7603_pse_client_reset(struct mt7603_dev *dev)
1368 {
1369     u32 addr;
1370 
1371     addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR +
1372                    MT_CLIENT_RESET_TX);
1373 
1374     /* Clear previous reset state */
1375     mt76_clear(dev, addr,
1376            MT_CLIENT_RESET_TX_R_E_1 |
1377            MT_CLIENT_RESET_TX_R_E_2 |
1378            MT_CLIENT_RESET_TX_R_E_1_S |
1379            MT_CLIENT_RESET_TX_R_E_2_S);
1380 
1381     /* Start PSE client TX abort */
1382     mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
1383     mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
1384                MT_CLIENT_RESET_TX_R_E_1_S, 500);
1385 
1386     mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2);
1387     mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
1388 
1389     /* Wait for PSE client to clear TX FIFO */
1390     mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S,
1391                MT_CLIENT_RESET_TX_R_E_2_S, 500);
1392 
1393     /* Clear PSE client TX abort state */
1394     mt76_clear(dev, addr,
1395            MT_CLIENT_RESET_TX_R_E_1 |
1396            MT_CLIENT_RESET_TX_R_E_2);
1397 }
1398 
1399 static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
1400 {
1401     if (!is_mt7628(dev))
1402         return;
1403 
1404     mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
1405     mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
1406 }
1407 
1408 static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
1409 {
1410     int beacon_int = dev->mt76.beacon_int;
1411     u32 mask = dev->mt76.mmio.irqmask;
1412     int i;
1413 
1414     ieee80211_stop_queues(dev->mt76.hw);
1415     set_bit(MT76_RESET, &dev->mphy.state);
1416 
1417     /* lock/unlock all queues to ensure that no tx is pending */
1418     mt76_txq_schedule_all(&dev->mphy);
1419 
1420     mt76_worker_disable(&dev->mt76.tx_worker);
1421     tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
1422     napi_disable(&dev->mt76.napi[0]);
1423     napi_disable(&dev->mt76.napi[1]);
1424     napi_disable(&dev->mt76.tx_napi);
1425 
1426     mutex_lock(&dev->mt76.mutex);
1427 
1428     mt7603_beacon_set_timer(dev, -1, 0);
1429 
1430     if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
1431         dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
1432         dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
1433         dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
1434         mt7603_pse_reset(dev);
1435 
1436     if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
1437         goto skip_dma_reset;
1438 
1439     mt7603_mac_stop(dev);
1440 
1441     mt76_clear(dev, MT_WPDMA_GLO_CFG,
1442            MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
1443            MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
1444     usleep_range(1000, 2000);
1445 
1446     mt7603_irq_disable(dev, mask);
1447 
1448     mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
1449 
1450     mt7603_pse_client_reset(dev);
1451 
1452     mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
1453     for (i = 0; i < __MT_TXQ_MAX; i++)
1454         mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
1455 
1456     mt76_for_each_q_rx(&dev->mt76, i) {
1457         mt76_queue_rx_reset(dev, i);
1458     }
1459 
1460     mt76_tx_status_check(&dev->mt76, true);
1461 
1462     mt7603_dma_sched_reset(dev);
1463 
1464     mt7603_mac_dma_start(dev);
1465 
1466     mt7603_irq_enable(dev, mask);
1467 
1468 skip_dma_reset:
1469     clear_bit(MT76_RESET, &dev->mphy.state);
1470     mutex_unlock(&dev->mt76.mutex);
1471 
1472     mt76_worker_enable(&dev->mt76.tx_worker);
1473 
1474     tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
1475     mt7603_beacon_set_timer(dev, -1, beacon_int);
1476 
1477     local_bh_disable();
1478     napi_enable(&dev->mt76.tx_napi);
1479     napi_schedule(&dev->mt76.tx_napi);
1480 
1481     napi_enable(&dev->mt76.napi[0]);
1482     napi_schedule(&dev->mt76.napi[0]);
1483 
1484     napi_enable(&dev->mt76.napi[1]);
1485     napi_schedule(&dev->mt76.napi[1]);
1486     local_bh_enable();
1487 
1488     ieee80211_wake_queues(dev->mt76.hw);
1489     mt76_txq_schedule_all(&dev->mphy);
1490 }
1491 
1492 static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
1493 {
1494     u32 val;
1495 
1496     mt76_wr(dev, MT_WPDMA_DEBUG,
1497         FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) |
1498         MT_WPDMA_DEBUG_SEL);
1499 
1500     val = mt76_rr(dev, MT_WPDMA_DEBUG);
1501     return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val);
1502 }
1503 
1504 static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev)
1505 {
1506     if (is_mt7628(dev))
1507         return mt7603_dma_debug(dev, 9) & BIT(9);
1508 
1509     return mt7603_dma_debug(dev, 2) & BIT(8);
1510 }
1511 
1512 static bool mt7603_rx_dma_busy(struct mt7603_dev *dev)
1513 {
1514     if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY))
1515         return false;
1516 
1517     return mt7603_rx_fifo_busy(dev);
1518 }
1519 
1520 static bool mt7603_tx_dma_busy(struct mt7603_dev *dev)
1521 {
1522     u32 val;
1523 
1524     if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY))
1525         return false;
1526 
1527     val = mt7603_dma_debug(dev, 9);
1528     return (val & BIT(8)) && (val & 0xf) != 0xf;
1529 }
1530 
1531 static bool mt7603_tx_hang(struct mt7603_dev *dev)
1532 {
1533     struct mt76_queue *q;
1534     u32 dma_idx, prev_dma_idx;
1535     int i;
1536 
1537     for (i = 0; i < 4; i++) {
1538         q = dev->mphy.q_tx[i];
1539 
1540         if (!q->queued)
1541             continue;
1542 
1543         prev_dma_idx = dev->tx_dma_idx[i];
1544         dma_idx = readl(&q->regs->dma_idx);
1545         dev->tx_dma_idx[i] = dma_idx;
1546 
1547         if (dma_idx == prev_dma_idx &&
1548             dma_idx != readl(&q->regs->cpu_idx))
1549             break;
1550     }
1551 
1552     return i < 4;
1553 }
1554 
1555 static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
1556 {
1557     u32 addr, val;
1558 
1559     if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
1560         return true;
1561 
1562     if (mt7603_rx_fifo_busy(dev))
1563         return false;
1564 
1565     addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
1566     mt76_wr(dev, addr, 3);
1567     val = mt76_rr(dev, addr) >> 16;
1568 
1569     if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
1570         return true;
1571 
1572     return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
1573 }
1574 
1575 static bool
1576 mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter,
1577               enum mt7603_reset_cause cause,
1578               bool (*check)(struct mt7603_dev *dev))
1579 {
1580     if (dev->reset_test == cause + 1) {
1581         dev->reset_test = 0;
1582         goto trigger;
1583     }
1584 
1585     if (check) {
1586         if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) {
1587             *counter = 0;
1588             return false;
1589         }
1590 
1591         (*counter)++;
1592     }
1593 
1594     if (*counter < MT7603_WATCHDOG_TIMEOUT)
1595         return false;
1596 trigger:
1597     dev->cur_reset_cause = cause;
1598     dev->reset_cause[cause]++;
1599     return true;
1600 }
1601 
1602 void mt7603_update_channel(struct mt76_phy *mphy)
1603 {
1604     struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
1605     struct mt76_channel_state *state;
1606 
1607     state = mphy->chan_state;
1608     state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
1609 }
1610 
1611 void
1612 mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val)
1613 {
1614     u32 rxtd_6 = 0xd7c80000;
1615 
1616     if (val == dev->ed_strict_mode)
1617         return;
1618 
1619     dev->ed_strict_mode = val;
1620 
1621     /* Ensure that ED/CCA does not trigger if disabled */
1622     if (!dev->ed_monitor)
1623         rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34);
1624     else
1625         rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d);
1626 
1627     if (dev->ed_monitor && !dev->ed_strict_mode)
1628         rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f);
1629     else
1630         rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10);
1631 
1632     mt76_wr(dev, MT_RXTD(6), rxtd_6);
1633 
1634     mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN,
1635                dev->ed_monitor && !dev->ed_strict_mode);
1636 }
1637 
1638 static void
1639 mt7603_edcca_check(struct mt7603_dev *dev)
1640 {
1641     u32 val = mt76_rr(dev, MT_AGC(41));
1642     ktime_t cur_time;
1643     int rssi0, rssi1;
1644     u32 active;
1645     u32 ed_busy;
1646 
1647     if (!dev->ed_monitor)
1648         return;
1649 
1650     rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val);
1651     if (rssi0 > 128)
1652         rssi0 -= 256;
1653 
1654     if (dev->mphy.antenna_mask & BIT(1)) {
1655         rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
1656         if (rssi1 > 128)
1657             rssi1 -= 256;
1658     } else {
1659         rssi1 = rssi0;
1660     }
1661 
1662     if (max(rssi0, rssi1) >= -40 &&
1663         dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH)
1664         dev->ed_strong_signal++;
1665     else if (dev->ed_strong_signal > 0)
1666         dev->ed_strong_signal--;
1667 
1668     cur_time = ktime_get_boottime();
1669     ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK;
1670 
1671     active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
1672     dev->ed_time = cur_time;
1673 
1674     if (!active)
1675         return;
1676 
1677     if (100 * ed_busy / active > 90) {
1678         if (dev->ed_trigger < 0)
1679             dev->ed_trigger = 0;
1680         dev->ed_trigger++;
1681     } else {
1682         if (dev->ed_trigger > 0)
1683             dev->ed_trigger = 0;
1684         dev->ed_trigger--;
1685     }
1686 
1687     if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH ||
1688         dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) {
1689         mt7603_edcca_set_strict(dev, true);
1690     } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) {
1691         mt7603_edcca_set_strict(dev, false);
1692     }
1693 
1694     if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH)
1695         dev->ed_trigger = MT7603_EDCCA_BLOCK_TH;
1696     else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH)
1697         dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH;
1698 }
1699 
1700 void mt7603_cca_stats_reset(struct mt7603_dev *dev)
1701 {
1702     mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
1703     mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
1704     mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN);
1705 }
1706 
1707 static void
1708 mt7603_adjust_sensitivity(struct mt7603_dev *dev)
1709 {
1710     u32 agc0 = dev->agc0, agc3 = dev->agc3;
1711     u32 adj;
1712 
1713     if (!dev->sensitivity || dev->sensitivity < -100) {
1714         dev->sensitivity = 0;
1715     } else if (dev->sensitivity <= -84) {
1716         adj = 7 + (dev->sensitivity + 92) / 2;
1717 
1718         agc0 = 0x56f0076f;
1719         agc0 |= adj << 12;
1720         agc0 |= adj << 16;
1721         agc3 = 0x81d0d5e3;
1722     } else if (dev->sensitivity <= -72) {
1723         adj = 7 + (dev->sensitivity + 80) / 2;
1724 
1725         agc0 = 0x6af0006f;
1726         agc0 |= adj << 8;
1727         agc0 |= adj << 12;
1728         agc0 |= adj << 16;
1729 
1730         agc3 = 0x8181d5e3;
1731     } else {
1732         if (dev->sensitivity > -54)
1733             dev->sensitivity = -54;
1734 
1735         adj = 7 + (dev->sensitivity + 80) / 2;
1736 
1737         agc0 = 0x7ff0000f;
1738         agc0 |= adj << 4;
1739         agc0 |= adj << 8;
1740         agc0 |= adj << 12;
1741         agc0 |= adj << 16;
1742 
1743         agc3 = 0x818181e3;
1744     }
1745 
1746     mt76_wr(dev, MT_AGC(0), agc0);
1747     mt76_wr(dev, MT_AGC1(0), agc0);
1748 
1749     mt76_wr(dev, MT_AGC(3), agc3);
1750     mt76_wr(dev, MT_AGC1(3), agc3);
1751 }
1752 
1753 static void
1754 mt7603_false_cca_check(struct mt7603_dev *dev)
1755 {
1756     int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm;
1757     int false_cca;
1758     int min_signal;
1759     u32 val;
1760 
1761     if (!dev->dynamic_sensitivity)
1762         return;
1763 
1764     val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
1765     pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
1766     pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
1767 
1768     val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY);
1769     mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val);
1770     mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val);
1771 
1772     dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1773     dev->false_cca_cck = pd_cck - mdrdy_cck;
1774 
1775     mt7603_cca_stats_reset(dev);
1776 
1777     min_signal = mt76_get_min_avg_rssi(&dev->mt76, false);
1778     if (!min_signal) {
1779         dev->sensitivity = 0;
1780         dev->last_cca_adj = jiffies;
1781         goto out;
1782     }
1783 
1784     min_signal -= 15;
1785 
1786     false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
1787     if (false_cca > 600 &&
1788         dev->sensitivity < -100 + dev->sensitivity_limit) {
1789         if (!dev->sensitivity)
1790             dev->sensitivity = -92;
1791         else
1792             dev->sensitivity += 2;
1793         dev->last_cca_adj = jiffies;
1794     } else if (false_cca < 100 ||
1795            time_after(jiffies, dev->last_cca_adj + 10 * HZ)) {
1796         dev->last_cca_adj = jiffies;
1797         if (!dev->sensitivity)
1798             goto out;
1799 
1800         dev->sensitivity -= 2;
1801     }
1802 
1803     if (dev->sensitivity && dev->sensitivity > min_signal) {
1804         dev->sensitivity = min_signal;
1805         dev->last_cca_adj = jiffies;
1806     }
1807 
1808 out:
1809     mt7603_adjust_sensitivity(dev);
1810 }
1811 
1812 void mt7603_mac_work(struct work_struct *work)
1813 {
1814     struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
1815                           mphy.mac_work.work);
1816     bool reset = false;
1817     int i, idx;
1818 
1819     mt76_tx_status_check(&dev->mt76, false);
1820 
1821     mutex_lock(&dev->mt76.mutex);
1822 
1823     dev->mphy.mac_work_count++;
1824     mt76_update_survey(&dev->mphy);
1825     mt7603_edcca_check(dev);
1826 
1827     for (i = 0, idx = 0; i < 2; i++) {
1828         u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
1829 
1830         dev->mt76.aggr_stats[idx++] += val & 0xffff;
1831         dev->mt76.aggr_stats[idx++] += val >> 16;
1832     }
1833 
1834     if (dev->mphy.mac_work_count == 10)
1835         mt7603_false_cca_check(dev);
1836 
1837     if (mt7603_watchdog_check(dev, &dev->rx_pse_check,
1838                   RESET_CAUSE_RX_PSE_BUSY,
1839                   mt7603_rx_pse_busy) ||
1840         mt7603_watchdog_check(dev, &dev->beacon_check,
1841                   RESET_CAUSE_BEACON_STUCK,
1842                   NULL) ||
1843         mt7603_watchdog_check(dev, &dev->tx_hang_check,
1844                   RESET_CAUSE_TX_HANG,
1845                   mt7603_tx_hang) ||
1846         mt7603_watchdog_check(dev, &dev->tx_dma_check,
1847                   RESET_CAUSE_TX_BUSY,
1848                   mt7603_tx_dma_busy) ||
1849         mt7603_watchdog_check(dev, &dev->rx_dma_check,
1850                   RESET_CAUSE_RX_BUSY,
1851                   mt7603_rx_dma_busy) ||
1852         mt7603_watchdog_check(dev, &dev->mcu_hang,
1853                   RESET_CAUSE_MCU_HANG,
1854                   NULL) ||
1855         dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
1856         dev->beacon_check = 0;
1857         dev->tx_dma_check = 0;
1858         dev->tx_hang_check = 0;
1859         dev->rx_dma_check = 0;
1860         dev->rx_pse_check = 0;
1861         dev->mcu_hang = 0;
1862         dev->rx_dma_idx = ~0;
1863         memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx));
1864         reset = true;
1865         dev->mphy.mac_work_count = 0;
1866     }
1867 
1868     if (dev->mphy.mac_work_count >= 10)
1869         dev->mphy.mac_work_count = 0;
1870 
1871     mutex_unlock(&dev->mt76.mutex);
1872 
1873     if (reset)
1874         mt7603_mac_watchdog_reset(dev);
1875 
1876     ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1877                      msecs_to_jiffies(MT7603_WATCHDOG_TIME));
1878 }