0001
0002
0003
0004
0005 #include "mt76.h"
0006
0007 static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
0008 {
0009
0010
0011
0012 return HZ / (tidno >= 4 ? 25 : 10);
0013 }
0014
0015 static void
0016 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
0017 {
0018 struct sk_buff *skb;
0019
0020 tid->head = ieee80211_sn_inc(tid->head);
0021
0022 skb = tid->reorder_buf[idx];
0023 if (!skb)
0024 return;
0025
0026 tid->reorder_buf[idx] = NULL;
0027 tid->nframes--;
0028 __skb_queue_tail(frames, skb);
0029 }
0030
0031 static void
0032 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
0033 struct sk_buff_head *frames,
0034 u16 head)
0035 {
0036 int idx;
0037
0038 while (ieee80211_sn_less(tid->head, head)) {
0039 idx = tid->head % tid->size;
0040 mt76_aggr_release(tid, frames, idx);
0041 }
0042 }
0043
0044 static void
0045 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
0046 {
0047 int idx = tid->head % tid->size;
0048
0049 while (tid->reorder_buf[idx]) {
0050 mt76_aggr_release(tid, frames, idx);
0051 idx = tid->head % tid->size;
0052 }
0053 }
0054
0055 static void
0056 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
0057 {
0058 struct mt76_rx_status *status;
0059 struct sk_buff *skb;
0060 int start, idx, nframes;
0061
0062 if (!tid->nframes)
0063 return;
0064
0065 mt76_rx_aggr_release_head(tid, frames);
0066
0067 start = tid->head % tid->size;
0068 nframes = tid->nframes;
0069
0070 for (idx = (tid->head + 1) % tid->size;
0071 idx != start && nframes;
0072 idx = (idx + 1) % tid->size) {
0073 skb = tid->reorder_buf[idx];
0074 if (!skb)
0075 continue;
0076
0077 nframes--;
0078 status = (struct mt76_rx_status *)skb->cb;
0079 if (!time_after32(jiffies,
0080 status->reorder_time +
0081 mt76_aggr_tid_to_timeo(tid->num)))
0082 continue;
0083
0084 mt76_rx_aggr_release_frames(tid, frames, status->seqno);
0085 }
0086
0087 mt76_rx_aggr_release_head(tid, frames);
0088 }
0089
0090 static void
0091 mt76_rx_aggr_reorder_work(struct work_struct *work)
0092 {
0093 struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
0094 reorder_work.work);
0095 struct mt76_dev *dev = tid->dev;
0096 struct sk_buff_head frames;
0097 int nframes;
0098
0099 __skb_queue_head_init(&frames);
0100
0101 local_bh_disable();
0102 rcu_read_lock();
0103
0104 spin_lock(&tid->lock);
0105 mt76_rx_aggr_check_release(tid, &frames);
0106 nframes = tid->nframes;
0107 spin_unlock(&tid->lock);
0108
0109 if (nframes)
0110 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
0111 mt76_aggr_tid_to_timeo(tid->num));
0112 mt76_rx_complete(dev, &frames, NULL);
0113
0114 rcu_read_unlock();
0115 local_bh_enable();
0116 }
0117
0118 static void
0119 mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
0120 {
0121 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
0122 struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
0123 struct mt76_wcid *wcid = status->wcid;
0124 struct mt76_rx_tid *tid;
0125 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
0126 u16 seqno;
0127
0128 if (!ieee80211_is_ctl(bar->frame_control))
0129 return;
0130
0131 if (!ieee80211_is_back_req(bar->frame_control))
0132 return;
0133
0134 status->qos_ctl = tidno = le16_to_cpu(bar->control) >> 12;
0135 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
0136 tid = rcu_dereference(wcid->aggr[tidno]);
0137 if (!tid)
0138 return;
0139
0140 spin_lock_bh(&tid->lock);
0141 if (!tid->stopped) {
0142 mt76_rx_aggr_release_frames(tid, frames, seqno);
0143 mt76_rx_aggr_release_head(tid, frames);
0144 }
0145 spin_unlock_bh(&tid->lock);
0146 }
0147
0148 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
0149 {
0150 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
0151 struct mt76_wcid *wcid = status->wcid;
0152 struct ieee80211_sta *sta;
0153 struct mt76_rx_tid *tid;
0154 bool sn_less;
0155 u16 seqno, head, size, idx;
0156 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
0157 u8 ackp;
0158
0159 __skb_queue_tail(frames, skb);
0160
0161 sta = wcid_to_sta(wcid);
0162 if (!sta)
0163 return;
0164
0165 if (!status->aggr) {
0166 if (!(status->flag & RX_FLAG_8023))
0167 mt76_rx_aggr_check_ctl(skb, frames);
0168 return;
0169 }
0170
0171
0172 ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
0173 if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
0174 return;
0175
0176 tid = rcu_dereference(wcid->aggr[tidno]);
0177 if (!tid)
0178 return;
0179
0180 status->flag |= RX_FLAG_DUP_VALIDATED;
0181 spin_lock_bh(&tid->lock);
0182
0183 if (tid->stopped)
0184 goto out;
0185
0186 head = tid->head;
0187 seqno = status->seqno;
0188 size = tid->size;
0189 sn_less = ieee80211_sn_less(seqno, head);
0190
0191 if (!tid->started) {
0192 if (sn_less)
0193 goto out;
0194
0195 tid->started = true;
0196 }
0197
0198 if (sn_less) {
0199 __skb_unlink(skb, frames);
0200 dev_kfree_skb(skb);
0201 goto out;
0202 }
0203
0204 if (seqno == head) {
0205 tid->head = ieee80211_sn_inc(head);
0206 if (tid->nframes)
0207 mt76_rx_aggr_release_head(tid, frames);
0208 goto out;
0209 }
0210
0211 __skb_unlink(skb, frames);
0212
0213
0214
0215
0216
0217 if (!ieee80211_sn_less(seqno, head + size)) {
0218 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
0219 mt76_rx_aggr_release_frames(tid, frames, head);
0220 }
0221
0222 idx = seqno % size;
0223
0224
0225 if (tid->reorder_buf[idx]) {
0226 dev_kfree_skb(skb);
0227 goto out;
0228 }
0229
0230 status->reorder_time = jiffies;
0231 tid->reorder_buf[idx] = skb;
0232 tid->nframes++;
0233 mt76_rx_aggr_release_head(tid, frames);
0234
0235 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
0236 mt76_aggr_tid_to_timeo(tid->num));
0237
0238 out:
0239 spin_unlock_bh(&tid->lock);
0240 }
0241
0242 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
0243 u16 ssn, u16 size)
0244 {
0245 struct mt76_rx_tid *tid;
0246
0247 mt76_rx_aggr_stop(dev, wcid, tidno);
0248
0249 tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
0250 if (!tid)
0251 return -ENOMEM;
0252
0253 tid->dev = dev;
0254 tid->head = ssn;
0255 tid->size = size;
0256 tid->num = tidno;
0257 INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
0258 spin_lock_init(&tid->lock);
0259
0260 rcu_assign_pointer(wcid->aggr[tidno], tid);
0261
0262 return 0;
0263 }
0264 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
0265
0266 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
0267 {
0268 u16 size = tid->size;
0269 int i;
0270
0271 spin_lock_bh(&tid->lock);
0272
0273 tid->stopped = true;
0274 for (i = 0; tid->nframes && i < size; i++) {
0275 struct sk_buff *skb = tid->reorder_buf[i];
0276
0277 if (!skb)
0278 continue;
0279
0280 tid->reorder_buf[i] = NULL;
0281 tid->nframes--;
0282 dev_kfree_skb(skb);
0283 }
0284
0285 spin_unlock_bh(&tid->lock);
0286
0287 cancel_delayed_work_sync(&tid->reorder_work);
0288 }
0289
0290 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
0291 {
0292 struct mt76_rx_tid *tid = NULL;
0293
0294 tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
0295 lockdep_is_held(&dev->mutex));
0296 if (tid) {
0297 mt76_rx_aggr_shutdown(dev, tid);
0298 kfree_rcu(tid, rcu_head);
0299 }
0300 }
0301 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);