0001
0002
0003
0004
0005
0006
0007
0008 #include "core.h"
0009 #include "txrx.h"
0010 #include "htt.h"
0011 #include "mac.h"
0012 #include "debug.h"
0013
0014 static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
0015 {
0016 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0017
0018 if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
0019 return;
0020
0021 if (ath10k_mac_tx_frm_has_freq(ar))
0022 return;
0023
0024
0025
0026
0027
0028
0029 spin_lock_bh(&ar->data_lock);
0030 if (ar->offchan_tx_skb != skb) {
0031 ath10k_warn(ar, "completed old offchannel frame\n");
0032 goto out;
0033 }
0034
0035 complete(&ar->offchan_tx_completed);
0036 ar->offchan_tx_skb = NULL;
0037
0038 ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
0039 out:
0040 spin_unlock_bh(&ar->data_lock);
0041 }
0042
0043 int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
0044 const struct htt_tx_done *tx_done)
0045 {
0046 struct ieee80211_tx_status status;
0047 struct ath10k *ar = htt->ar;
0048 struct device *dev = ar->dev;
0049 struct ieee80211_tx_info *info;
0050 struct ieee80211_txq *txq;
0051 struct ath10k_skb_cb *skb_cb;
0052 struct ath10k_txq *artxq;
0053 struct sk_buff *msdu;
0054 u8 flags;
0055
0056 ath10k_dbg(ar, ATH10K_DBG_HTT,
0057 "htt tx completion msdu_id %u status %d\n",
0058 tx_done->msdu_id, tx_done->status);
0059
0060 if (tx_done->msdu_id >= htt->max_num_pending_tx) {
0061 ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
0062 tx_done->msdu_id);
0063 return -EINVAL;
0064 }
0065
0066 spin_lock_bh(&htt->tx_lock);
0067 msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
0068 if (!msdu) {
0069 ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
0070 tx_done->msdu_id);
0071 spin_unlock_bh(&htt->tx_lock);
0072 return -ENOENT;
0073 }
0074
0075 skb_cb = ATH10K_SKB_CB(msdu);
0076 txq = skb_cb->txq;
0077
0078 if (txq) {
0079 artxq = (void *)txq->drv_priv;
0080 artxq->num_fw_queued--;
0081 }
0082
0083 flags = skb_cb->flags;
0084 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
0085 ath10k_htt_tx_dec_pending(htt);
0086 spin_unlock_bh(&htt->tx_lock);
0087
0088 rcu_read_lock();
0089 if (txq && txq->sta && skb_cb->airtime_est)
0090 ieee80211_sta_register_airtime(txq->sta, txq->tid,
0091 skb_cb->airtime_est, 0);
0092 rcu_read_unlock();
0093
0094 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
0095 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
0096
0097 ath10k_report_offchan_tx(htt->ar, msdu);
0098
0099 info = IEEE80211_SKB_CB(msdu);
0100 memset(&info->status, 0, sizeof(info->status));
0101 info->status.rates[0].idx = -1;
0102
0103 trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
0104
0105 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
0106 !(flags & ATH10K_SKB_F_NOACK_TID))
0107 info->flags |= IEEE80211_TX_STAT_ACK;
0108
0109 if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
0110 info->flags &= ~IEEE80211_TX_STAT_ACK;
0111
0112 if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
0113 ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
0114 (flags & ATH10K_SKB_F_NOACK_TID)))
0115 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
0116
0117 if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
0118 if ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
0119 (flags & ATH10K_SKB_F_NOACK_TID))
0120 info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
0121 else
0122 info->flags &= ~IEEE80211_TX_STAT_ACK;
0123 }
0124
0125 if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
0126 tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
0127 info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
0128 tx_done->ack_rssi;
0129 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
0130 }
0131
0132 memset(&status, 0, sizeof(status));
0133 status.skb = msdu;
0134 status.info = info;
0135
0136 rcu_read_lock();
0137
0138 if (txq)
0139 status.sta = txq->sta;
0140
0141 ieee80211_tx_status_ext(htt->ar->hw, &status);
0142
0143 rcu_read_unlock();
0144
0145
0146
0147 return 0;
0148 }
0149
0150 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
0151 const u8 *addr)
0152 {
0153 struct ath10k_peer *peer;
0154
0155 lockdep_assert_held(&ar->data_lock);
0156
0157 list_for_each_entry(peer, &ar->peers, list) {
0158 if (peer->vdev_id != vdev_id)
0159 continue;
0160 if (!ether_addr_equal(peer->addr, addr))
0161 continue;
0162
0163 return peer;
0164 }
0165
0166 return NULL;
0167 }
0168
0169 struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
0170 {
0171 struct ath10k_peer *peer;
0172
0173 if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
0174 return NULL;
0175
0176 lockdep_assert_held(&ar->data_lock);
0177
0178 list_for_each_entry(peer, &ar->peers, list)
0179 if (test_bit(peer_id, peer->peer_ids))
0180 return peer;
0181
0182 return NULL;
0183 }
0184
0185 static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
0186 const u8 *addr, bool expect_mapped)
0187 {
0188 long time_left;
0189
0190 time_left = wait_event_timeout(ar->peer_mapping_wq, ({
0191 bool mapped;
0192
0193 spin_lock_bh(&ar->data_lock);
0194 mapped = !!ath10k_peer_find(ar, vdev_id, addr);
0195 spin_unlock_bh(&ar->data_lock);
0196
0197 (mapped == expect_mapped ||
0198 test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
0199 }), 3 * HZ);
0200
0201 if (time_left == 0)
0202 return -ETIMEDOUT;
0203
0204 return 0;
0205 }
0206
0207 int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
0208 {
0209 return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
0210 }
0211
0212 int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
0213 {
0214 return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
0215 }
0216
0217 void ath10k_peer_map_event(struct ath10k_htt *htt,
0218 struct htt_peer_map_event *ev)
0219 {
0220 struct ath10k *ar = htt->ar;
0221 struct ath10k_peer *peer;
0222
0223 if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
0224 ath10k_warn(ar,
0225 "received htt peer map event with idx out of bounds: %u\n",
0226 ev->peer_id);
0227 return;
0228 }
0229
0230 spin_lock_bh(&ar->data_lock);
0231 peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
0232 if (!peer) {
0233 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
0234 if (!peer)
0235 goto exit;
0236
0237 peer->vdev_id = ev->vdev_id;
0238 ether_addr_copy(peer->addr, ev->addr);
0239 list_add(&peer->list, &ar->peers);
0240 wake_up(&ar->peer_mapping_wq);
0241 }
0242
0243 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
0244 ev->vdev_id, ev->addr, ev->peer_id);
0245
0246 WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
0247 ar->peer_map[ev->peer_id] = peer;
0248 set_bit(ev->peer_id, peer->peer_ids);
0249 exit:
0250 spin_unlock_bh(&ar->data_lock);
0251 }
0252
0253 void ath10k_peer_unmap_event(struct ath10k_htt *htt,
0254 struct htt_peer_unmap_event *ev)
0255 {
0256 struct ath10k *ar = htt->ar;
0257 struct ath10k_peer *peer;
0258
0259 if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
0260 ath10k_warn(ar,
0261 "received htt peer unmap event with idx out of bounds: %u\n",
0262 ev->peer_id);
0263 return;
0264 }
0265
0266 spin_lock_bh(&ar->data_lock);
0267 peer = ath10k_peer_find_by_id(ar, ev->peer_id);
0268 if (!peer) {
0269 ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
0270 ev->peer_id);
0271 goto exit;
0272 }
0273
0274 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
0275 peer->vdev_id, peer->addr, ev->peer_id);
0276
0277 ar->peer_map[ev->peer_id] = NULL;
0278 clear_bit(ev->peer_id, peer->peer_ids);
0279
0280 if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
0281 list_del(&peer->list);
0282 kfree(peer);
0283 wake_up(&ar->peer_mapping_wq);
0284 }
0285
0286 exit:
0287 spin_unlock_bh(&ar->data_lock);
0288 }