Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: ISC
0002 /*
0003  * Copyright (c) 2005-2011 Atheros Communications Inc.
0004  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
0005  */
0006 
0007 #include <linux/etherdevice.h>
0008 #include "htt.h"
0009 #include "mac.h"
0010 #include "hif.h"
0011 #include "txrx.h"
0012 #include "debug.h"
0013 
0014 static u8 ath10k_htt_tx_txq_calc_size(size_t count)
0015 {
0016     int exp;
0017     int factor;
0018 
0019     exp = 0;
0020     factor = count >> 7;
0021 
0022     while (factor >= 64 && exp < 4) {
0023         factor >>= 3;
0024         exp++;
0025     }
0026 
0027     if (exp == 4)
0028         return 0xff;
0029 
0030     if (count > 0)
0031         factor = max(1, factor);
0032 
0033     return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
0034            SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
0035 }
0036 
0037 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
0038                        struct ieee80211_txq *txq)
0039 {
0040     struct ath10k *ar = hw->priv;
0041     struct ath10k_sta *arsta;
0042     struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
0043     unsigned long frame_cnt;
0044     unsigned long byte_cnt;
0045     int idx;
0046     u32 bit;
0047     u16 peer_id;
0048     u8 tid;
0049     u8 count;
0050 
0051     lockdep_assert_held(&ar->htt.tx_lock);
0052 
0053     if (!ar->htt.tx_q_state.enabled)
0054         return;
0055 
0056     if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
0057         return;
0058 
0059     if (txq->sta) {
0060         arsta = (void *)txq->sta->drv_priv;
0061         peer_id = arsta->peer_id;
0062     } else {
0063         peer_id = arvif->peer_id;
0064     }
0065 
0066     tid = txq->tid;
0067     bit = BIT(peer_id % 32);
0068     idx = peer_id / 32;
0069 
0070     ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
0071     count = ath10k_htt_tx_txq_calc_size(byte_cnt);
0072 
0073     if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
0074         unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
0075         ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n",
0076                 peer_id, tid);
0077         return;
0078     }
0079 
0080     ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
0081     ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
0082     ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
0083 
0084     ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n",
0085            peer_id, tid, count);
0086 }
0087 
0088 static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
0089 {
0090     u32 seq;
0091     size_t size;
0092 
0093     lockdep_assert_held(&ar->htt.tx_lock);
0094 
0095     if (!ar->htt.tx_q_state.enabled)
0096         return;
0097 
0098     if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
0099         return;
0100 
0101     seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
0102     seq++;
0103     ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
0104 
0105     ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
0106            seq);
0107 
0108     size = sizeof(*ar->htt.tx_q_state.vaddr);
0109     dma_sync_single_for_device(ar->dev,
0110                    ar->htt.tx_q_state.paddr,
0111                    size,
0112                    DMA_TO_DEVICE);
0113 }
0114 
0115 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
0116                   struct ieee80211_txq *txq)
0117 {
0118     struct ath10k *ar = hw->priv;
0119 
0120     spin_lock_bh(&ar->htt.tx_lock);
0121     __ath10k_htt_tx_txq_recalc(hw, txq);
0122     spin_unlock_bh(&ar->htt.tx_lock);
0123 }
0124 
0125 void ath10k_htt_tx_txq_sync(struct ath10k *ar)
0126 {
0127     spin_lock_bh(&ar->htt.tx_lock);
0128     __ath10k_htt_tx_txq_sync(ar);
0129     spin_unlock_bh(&ar->htt.tx_lock);
0130 }
0131 
0132 void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
0133                   struct ieee80211_txq *txq)
0134 {
0135     struct ath10k *ar = hw->priv;
0136 
0137     spin_lock_bh(&ar->htt.tx_lock);
0138     __ath10k_htt_tx_txq_recalc(hw, txq);
0139     __ath10k_htt_tx_txq_sync(ar);
0140     spin_unlock_bh(&ar->htt.tx_lock);
0141 }
0142 
0143 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
0144 {
0145     lockdep_assert_held(&htt->tx_lock);
0146 
0147     htt->num_pending_tx--;
0148     if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
0149         ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
0150 
0151     if (htt->num_pending_tx == 0)
0152         wake_up(&htt->empty_tx_wq);
0153 }
0154 
0155 int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
0156 {
0157     lockdep_assert_held(&htt->tx_lock);
0158 
0159     if (htt->num_pending_tx >= htt->max_num_pending_tx)
0160         return -EBUSY;
0161 
0162     htt->num_pending_tx++;
0163     if (htt->num_pending_tx == htt->max_num_pending_tx)
0164         ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
0165 
0166     return 0;
0167 }
0168 
0169 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
0170                    bool is_presp)
0171 {
0172     struct ath10k *ar = htt->ar;
0173 
0174     lockdep_assert_held(&htt->tx_lock);
0175 
0176     if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
0177         return 0;
0178 
0179     if (is_presp &&
0180         ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
0181         return -EBUSY;
0182 
0183     htt->num_pending_mgmt_tx++;
0184 
0185     return 0;
0186 }
0187 
0188 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
0189 {
0190     lockdep_assert_held(&htt->tx_lock);
0191 
0192     if (!htt->ar->hw_params.max_probe_resp_desc_thres)
0193         return;
0194 
0195     htt->num_pending_mgmt_tx--;
0196 }
0197 
0198 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
0199 {
0200     struct ath10k *ar = htt->ar;
0201     int ret;
0202 
0203     spin_lock_bh(&htt->tx_lock);
0204     ret = idr_alloc(&htt->pending_tx, skb, 0,
0205             htt->max_num_pending_tx, GFP_ATOMIC);
0206     spin_unlock_bh(&htt->tx_lock);
0207 
0208     ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
0209 
0210     return ret;
0211 }
0212 
0213 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
0214 {
0215     struct ath10k *ar = htt->ar;
0216 
0217     lockdep_assert_held(&htt->tx_lock);
0218 
0219     ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id);
0220 
0221     idr_remove(&htt->pending_tx, msdu_id);
0222 }
0223 
0224 static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
0225 {
0226     struct ath10k *ar = htt->ar;
0227     size_t size;
0228 
0229     if (!htt->txbuf.vaddr_txbuff_32)
0230         return;
0231 
0232     size = htt->txbuf.size;
0233     dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
0234               htt->txbuf.paddr);
0235     htt->txbuf.vaddr_txbuff_32 = NULL;
0236 }
0237 
0238 static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
0239 {
0240     struct ath10k *ar = htt->ar;
0241     size_t size;
0242 
0243     size = htt->max_num_pending_tx *
0244             sizeof(struct ath10k_htt_txbuf_32);
0245 
0246     htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
0247                             &htt->txbuf.paddr,
0248                             GFP_KERNEL);
0249     if (!htt->txbuf.vaddr_txbuff_32)
0250         return -ENOMEM;
0251 
0252     htt->txbuf.size = size;
0253 
0254     return 0;
0255 }
0256 
0257 static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
0258 {
0259     struct ath10k *ar = htt->ar;
0260     size_t size;
0261 
0262     if (!htt->txbuf.vaddr_txbuff_64)
0263         return;
0264 
0265     size = htt->txbuf.size;
0266     dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
0267               htt->txbuf.paddr);
0268     htt->txbuf.vaddr_txbuff_64 = NULL;
0269 }
0270 
0271 static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
0272 {
0273     struct ath10k *ar = htt->ar;
0274     size_t size;
0275 
0276     size = htt->max_num_pending_tx *
0277             sizeof(struct ath10k_htt_txbuf_64);
0278 
0279     htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
0280                             &htt->txbuf.paddr,
0281                             GFP_KERNEL);
0282     if (!htt->txbuf.vaddr_txbuff_64)
0283         return -ENOMEM;
0284 
0285     htt->txbuf.size = size;
0286 
0287     return 0;
0288 }
0289 
0290 static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
0291 {
0292     size_t size;
0293 
0294     if (!htt->frag_desc.vaddr_desc_32)
0295         return;
0296 
0297     size = htt->max_num_pending_tx *
0298             sizeof(struct htt_msdu_ext_desc);
0299 
0300     dma_free_coherent(htt->ar->dev,
0301               size,
0302               htt->frag_desc.vaddr_desc_32,
0303               htt->frag_desc.paddr);
0304 
0305     htt->frag_desc.vaddr_desc_32 = NULL;
0306 }
0307 
0308 static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
0309 {
0310     struct ath10k *ar = htt->ar;
0311     size_t size;
0312 
0313     if (!ar->hw_params.continuous_frag_desc)
0314         return 0;
0315 
0316     size = htt->max_num_pending_tx *
0317             sizeof(struct htt_msdu_ext_desc);
0318     htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
0319                               &htt->frag_desc.paddr,
0320                               GFP_KERNEL);
0321     if (!htt->frag_desc.vaddr_desc_32) {
0322         ath10k_err(ar, "failed to alloc fragment desc memory\n");
0323         return -ENOMEM;
0324     }
0325     htt->frag_desc.size = size;
0326 
0327     return 0;
0328 }
0329 
0330 static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
0331 {
0332     size_t size;
0333 
0334     if (!htt->frag_desc.vaddr_desc_64)
0335         return;
0336 
0337     size = htt->max_num_pending_tx *
0338             sizeof(struct htt_msdu_ext_desc_64);
0339 
0340     dma_free_coherent(htt->ar->dev,
0341               size,
0342               htt->frag_desc.vaddr_desc_64,
0343               htt->frag_desc.paddr);
0344 
0345     htt->frag_desc.vaddr_desc_64 = NULL;
0346 }
0347 
0348 static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
0349 {
0350     struct ath10k *ar = htt->ar;
0351     size_t size;
0352 
0353     if (!ar->hw_params.continuous_frag_desc)
0354         return 0;
0355 
0356     size = htt->max_num_pending_tx *
0357             sizeof(struct htt_msdu_ext_desc_64);
0358 
0359     htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
0360                               &htt->frag_desc.paddr,
0361                               GFP_KERNEL);
0362     if (!htt->frag_desc.vaddr_desc_64) {
0363         ath10k_err(ar, "failed to alloc fragment desc memory\n");
0364         return -ENOMEM;
0365     }
0366     htt->frag_desc.size = size;
0367 
0368     return 0;
0369 }
0370 
0371 static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
0372 {
0373     struct ath10k *ar = htt->ar;
0374     size_t size;
0375 
0376     if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
0377               ar->running_fw->fw_file.fw_features))
0378         return;
0379 
0380     size = sizeof(*htt->tx_q_state.vaddr);
0381 
0382     dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
0383     kfree(htt->tx_q_state.vaddr);
0384 }
0385 
0386 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
0387 {
0388     struct ath10k *ar = htt->ar;
0389     size_t size;
0390     int ret;
0391 
0392     if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
0393               ar->running_fw->fw_file.fw_features))
0394         return 0;
0395 
0396     htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
0397     htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
0398     htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
0399 
0400     size = sizeof(*htt->tx_q_state.vaddr);
0401     htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
0402     if (!htt->tx_q_state.vaddr)
0403         return -ENOMEM;
0404 
0405     htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
0406                            size, DMA_TO_DEVICE);
0407     ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
0408     if (ret) {
0409         ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
0410         kfree(htt->tx_q_state.vaddr);
0411         return -EIO;
0412     }
0413 
0414     return 0;
0415 }
0416 
0417 static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
0418 {
0419     WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
0420     kfifo_free(&htt->txdone_fifo);
0421 }
0422 
0423 static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
0424 {
0425     int ret;
0426     size_t size;
0427 
0428     size = roundup_pow_of_two(htt->max_num_pending_tx);
0429     ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
0430     return ret;
0431 }
0432 
0433 static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
0434 {
0435     struct ath10k *ar = htt->ar;
0436     int ret;
0437 
0438     ret = ath10k_htt_alloc_txbuff(htt);
0439     if (ret) {
0440         ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
0441         return ret;
0442     }
0443 
0444     ret = ath10k_htt_alloc_frag_desc(htt);
0445     if (ret) {
0446         ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
0447         goto free_txbuf;
0448     }
0449 
0450     ret = ath10k_htt_tx_alloc_txq(htt);
0451     if (ret) {
0452         ath10k_err(ar, "failed to alloc txq: %d\n", ret);
0453         goto free_frag_desc;
0454     }
0455 
0456     ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
0457     if (ret) {
0458         ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
0459         goto free_txq;
0460     }
0461 
0462     return 0;
0463 
0464 free_txq:
0465     ath10k_htt_tx_free_txq(htt);
0466 
0467 free_frag_desc:
0468     ath10k_htt_free_frag_desc(htt);
0469 
0470 free_txbuf:
0471     ath10k_htt_free_txbuff(htt);
0472 
0473     return ret;
0474 }
0475 
0476 int ath10k_htt_tx_start(struct ath10k_htt *htt)
0477 {
0478     struct ath10k *ar = htt->ar;
0479     int ret;
0480 
0481     ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
0482            htt->max_num_pending_tx);
0483 
0484     spin_lock_init(&htt->tx_lock);
0485     idr_init(&htt->pending_tx);
0486 
0487     if (htt->tx_mem_allocated)
0488         return 0;
0489 
0490     if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
0491         return 0;
0492 
0493     ret = ath10k_htt_tx_alloc_buf(htt);
0494     if (ret)
0495         goto free_idr_pending_tx;
0496 
0497     htt->tx_mem_allocated = true;
0498 
0499     return 0;
0500 
0501 free_idr_pending_tx:
0502     idr_destroy(&htt->pending_tx);
0503 
0504     return ret;
0505 }
0506 
0507 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
0508 {
0509     struct ath10k *ar = ctx;
0510     struct ath10k_htt *htt = &ar->htt;
0511     struct htt_tx_done tx_done = {0};
0512 
0513     ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);
0514 
0515     tx_done.msdu_id = msdu_id;
0516     tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
0517 
0518     ath10k_txrx_tx_unref(htt, &tx_done);
0519 
0520     return 0;
0521 }
0522 
0523 void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
0524 {
0525     if (!htt->tx_mem_allocated)
0526         return;
0527 
0528     ath10k_htt_free_txbuff(htt);
0529     ath10k_htt_tx_free_txq(htt);
0530     ath10k_htt_free_frag_desc(htt);
0531     ath10k_htt_tx_free_txdone_fifo(htt);
0532     htt->tx_mem_allocated = false;
0533 }
0534 
0535 static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
0536 {
0537     ath10k_htc_stop_hl(htt->ar);
0538     idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
0539 }
0540 
0541 void ath10k_htt_tx_stop(struct ath10k_htt *htt)
0542 {
0543     ath10k_htt_flush_tx_queue(htt);
0544     idr_destroy(&htt->pending_tx);
0545 }
0546 
0547 void ath10k_htt_tx_free(struct ath10k_htt *htt)
0548 {
0549     ath10k_htt_tx_stop(htt);
0550     ath10k_htt_tx_destroy(htt);
0551 }
0552 
0553 void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
0554 {
0555     queue_work(ar->workqueue, &ar->bundle_tx_work);
0556 }
0557 
0558 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
0559 {
0560     struct ath10k_htt *htt = &ar->htt;
0561     struct htt_tx_done tx_done = {0};
0562     struct htt_cmd_hdr *htt_hdr;
0563     struct htt_data_tx_desc *desc_hdr = NULL;
0564     u16 flags1 = 0;
0565     u8 msg_type = 0;
0566 
0567     if (htt->disable_tx_comp) {
0568         htt_hdr = (struct htt_cmd_hdr *)skb->data;
0569         msg_type = htt_hdr->msg_type;
0570 
0571         if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {
0572             desc_hdr = (struct htt_data_tx_desc *)
0573                 (skb->data + sizeof(*htt_hdr));
0574             flags1 = __le16_to_cpu(desc_hdr->flags1);
0575             skb_pull(skb, sizeof(struct htt_cmd_hdr));
0576             skb_pull(skb, sizeof(struct htt_data_tx_desc));
0577         }
0578     }
0579 
0580     dev_kfree_skb_any(skb);
0581 
0582     if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))
0583         return;
0584 
0585     ath10k_dbg(ar, ATH10K_DBG_HTT,
0586            "htt tx complete msdu id:%u ,flags1:%x\n",
0587            __le16_to_cpu(desc_hdr->id), flags1);
0588 
0589     if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)
0590         return;
0591 
0592     tx_done.status = HTT_TX_COMPL_STATE_ACK;
0593     tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);
0594     ath10k_txrx_tx_unref(&ar->htt, &tx_done);
0595 }
0596 
0597 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
0598 {
0599     dev_kfree_skb_any(skb);
0600 }
0601 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
0602 
0603 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
0604 {
0605     struct ath10k *ar = htt->ar;
0606     struct sk_buff *skb;
0607     struct htt_cmd *cmd;
0608     int len = 0;
0609     int ret;
0610 
0611     len += sizeof(cmd->hdr);
0612     len += sizeof(cmd->ver_req);
0613 
0614     skb = ath10k_htc_alloc_skb(ar, len);
0615     if (!skb)
0616         return -ENOMEM;
0617 
0618     skb_put(skb, len);
0619     cmd = (struct htt_cmd *)skb->data;
0620     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
0621 
0622     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
0623     if (ret) {
0624         dev_kfree_skb_any(skb);
0625         return ret;
0626     }
0627 
0628     return 0;
0629 }
0630 
0631 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
0632                  u64 cookie)
0633 {
0634     struct ath10k *ar = htt->ar;
0635     struct htt_stats_req *req;
0636     struct sk_buff *skb;
0637     struct htt_cmd *cmd;
0638     int len = 0, ret;
0639 
0640     len += sizeof(cmd->hdr);
0641     len += sizeof(cmd->stats_req);
0642 
0643     skb = ath10k_htc_alloc_skb(ar, len);
0644     if (!skb)
0645         return -ENOMEM;
0646 
0647     skb_put(skb, len);
0648     cmd = (struct htt_cmd *)skb->data;
0649     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
0650 
0651     req = &cmd->stats_req;
0652 
0653     memset(req, 0, sizeof(*req));
0654 
0655     /* currently we support only max 24 bit masks so no need to worry
0656      * about endian support
0657      */
0658     memcpy(req->upload_types, &mask, 3);
0659     memcpy(req->reset_types, &reset_mask, 3);
0660     req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
0661     req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
0662     req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
0663 
0664     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
0665     if (ret) {
0666         ath10k_warn(ar, "failed to send htt type stats request: %d",
0667                 ret);
0668         dev_kfree_skb_any(skb);
0669         return ret;
0670     }
0671 
0672     return 0;
0673 }
0674 
0675 static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
0676 {
0677     struct ath10k *ar = htt->ar;
0678     struct sk_buff *skb;
0679     struct htt_cmd *cmd;
0680     struct htt_frag_desc_bank_cfg32 *cfg;
0681     int ret, size;
0682     u8 info;
0683 
0684     if (!ar->hw_params.continuous_frag_desc)
0685         return 0;
0686 
0687     if (!htt->frag_desc.paddr) {
0688         ath10k_warn(ar, "invalid frag desc memory\n");
0689         return -EINVAL;
0690     }
0691 
0692     size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
0693     skb = ath10k_htc_alloc_skb(ar, size);
0694     if (!skb)
0695         return -ENOMEM;
0696 
0697     skb_put(skb, size);
0698     cmd = (struct htt_cmd *)skb->data;
0699     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
0700 
0701     info = 0;
0702     info |= SM(htt->tx_q_state.type,
0703            HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
0704 
0705     if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
0706              ar->running_fw->fw_file.fw_features))
0707         info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
0708 
0709     cfg = &cmd->frag_desc_bank_cfg32;
0710     cfg->info = info;
0711     cfg->num_banks = 1;
0712     cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
0713     cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
0714     cfg->bank_id[0].bank_min_id = 0;
0715     cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
0716                             1);
0717 
0718     cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
0719     cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
0720     cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
0721     cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
0722     cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
0723 
0724     ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
0725 
0726     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
0727     if (ret) {
0728         ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
0729                 ret);
0730         dev_kfree_skb_any(skb);
0731         return ret;
0732     }
0733 
0734     return 0;
0735 }
0736 
0737 static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
0738 {
0739     struct ath10k *ar = htt->ar;
0740     struct sk_buff *skb;
0741     struct htt_cmd *cmd;
0742     struct htt_frag_desc_bank_cfg64 *cfg;
0743     int ret, size;
0744     u8 info;
0745 
0746     if (!ar->hw_params.continuous_frag_desc)
0747         return 0;
0748 
0749     if (!htt->frag_desc.paddr) {
0750         ath10k_warn(ar, "invalid frag desc memory\n");
0751         return -EINVAL;
0752     }
0753 
0754     size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
0755     skb = ath10k_htc_alloc_skb(ar, size);
0756     if (!skb)
0757         return -ENOMEM;
0758 
0759     skb_put(skb, size);
0760     cmd = (struct htt_cmd *)skb->data;
0761     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
0762 
0763     info = 0;
0764     info |= SM(htt->tx_q_state.type,
0765            HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
0766 
0767     if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
0768              ar->running_fw->fw_file.fw_features))
0769         info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
0770 
0771     cfg = &cmd->frag_desc_bank_cfg64;
0772     cfg->info = info;
0773     cfg->num_banks = 1;
0774     cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
0775     cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
0776     cfg->bank_id[0].bank_min_id = 0;
0777     cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
0778                             1);
0779 
0780     cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
0781     cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
0782     cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
0783     cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
0784     cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
0785 
0786     ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
0787 
0788     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
0789     if (ret) {
0790         ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
0791                 ret);
0792         dev_kfree_skb_any(skb);
0793         return ret;
0794     }
0795 
0796     return 0;
0797 }
0798 
0799 static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring)
0800 {
0801     struct htt_rx_ring_setup_ring32 *ring =
0802             (struct htt_rx_ring_setup_ring32 *)rx_ring;
0803 
0804     ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
0805 }
0806 
0807 static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring)
0808 {
0809     struct htt_rx_ring_setup_ring64 *ring =
0810             (struct htt_rx_ring_setup_ring64 *)rx_ring;
0811 
0812     ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
0813 }
0814 
0815 static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
0816 {
0817     struct ath10k *ar = htt->ar;
0818     struct ath10k_hw_params *hw = &ar->hw_params;
0819     struct sk_buff *skb;
0820     struct htt_cmd *cmd;
0821     struct htt_rx_ring_setup_ring32 *ring;
0822     const int num_rx_ring = 1;
0823     u16 flags;
0824     u32 fw_idx;
0825     int len;
0826     int ret;
0827 
0828     /*
0829      * the HW expects the buffer to be an integral number of 4-byte
0830      * "words"
0831      */
0832     BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
0833     BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
0834 
0835     len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
0836         + (sizeof(*ring) * num_rx_ring);
0837     skb = ath10k_htc_alloc_skb(ar, len);
0838     if (!skb)
0839         return -ENOMEM;
0840 
0841     skb_put(skb, len);
0842 
0843     cmd = (struct htt_cmd *)skb->data;
0844     ring = &cmd->rx_setup_32.rings[0];
0845 
0846     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
0847     cmd->rx_setup_32.hdr.num_rings = 1;
0848 
0849     /* FIXME: do we need all of this? */
0850     flags = 0;
0851     flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
0852     flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
0853     flags |= HTT_RX_RING_FLAGS_PPDU_START;
0854     flags |= HTT_RX_RING_FLAGS_PPDU_END;
0855     flags |= HTT_RX_RING_FLAGS_MPDU_START;
0856     flags |= HTT_RX_RING_FLAGS_MPDU_END;
0857     flags |= HTT_RX_RING_FLAGS_MSDU_START;
0858     flags |= HTT_RX_RING_FLAGS_MSDU_END;
0859     flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
0860     flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
0861     flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
0862     flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
0863     flags |= HTT_RX_RING_FLAGS_CTRL_RX;
0864     flags |= HTT_RX_RING_FLAGS_MGMT_RX;
0865     flags |= HTT_RX_RING_FLAGS_NULL_RX;
0866     flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
0867 
0868     fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
0869 
0870     ring->fw_idx_shadow_reg_paddr =
0871         __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
0872     ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
0873     ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
0874     ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
0875     ring->flags = __cpu_to_le16(flags);
0876     ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
0877 
0878     ath10k_htt_fill_rx_desc_offset_32(hw, ring);
0879     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
0880     if (ret) {
0881         dev_kfree_skb_any(skb);
0882         return ret;
0883     }
0884 
0885     return 0;
0886 }
0887 
0888 static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
0889 {
0890     struct ath10k *ar = htt->ar;
0891     struct ath10k_hw_params *hw = &ar->hw_params;
0892     struct sk_buff *skb;
0893     struct htt_cmd *cmd;
0894     struct htt_rx_ring_setup_ring64 *ring;
0895     const int num_rx_ring = 1;
0896     u16 flags;
0897     u32 fw_idx;
0898     int len;
0899     int ret;
0900 
0901     /* HW expects the buffer to be an integral number of 4-byte
0902      * "words"
0903      */
0904     BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
0905     BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
0906 
0907     len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
0908         + (sizeof(*ring) * num_rx_ring);
0909     skb = ath10k_htc_alloc_skb(ar, len);
0910     if (!skb)
0911         return -ENOMEM;
0912 
0913     skb_put(skb, len);
0914 
0915     cmd = (struct htt_cmd *)skb->data;
0916     ring = &cmd->rx_setup_64.rings[0];
0917 
0918     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
0919     cmd->rx_setup_64.hdr.num_rings = 1;
0920 
0921     flags = 0;
0922     flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
0923     flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
0924     flags |= HTT_RX_RING_FLAGS_PPDU_START;
0925     flags |= HTT_RX_RING_FLAGS_PPDU_END;
0926     flags |= HTT_RX_RING_FLAGS_MPDU_START;
0927     flags |= HTT_RX_RING_FLAGS_MPDU_END;
0928     flags |= HTT_RX_RING_FLAGS_MSDU_START;
0929     flags |= HTT_RX_RING_FLAGS_MSDU_END;
0930     flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
0931     flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
0932     flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
0933     flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
0934     flags |= HTT_RX_RING_FLAGS_CTRL_RX;
0935     flags |= HTT_RX_RING_FLAGS_MGMT_RX;
0936     flags |= HTT_RX_RING_FLAGS_NULL_RX;
0937     flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
0938 
0939     fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
0940 
0941     ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
0942     ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
0943     ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
0944     ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
0945     ring->flags = __cpu_to_le16(flags);
0946     ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
0947 
0948     ath10k_htt_fill_rx_desc_offset_64(hw, ring);
0949     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
0950     if (ret) {
0951         dev_kfree_skb_any(skb);
0952         return ret;
0953     }
0954 
0955     return 0;
0956 }
0957 
0958 static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
0959 {
0960     struct ath10k *ar = htt->ar;
0961     struct sk_buff *skb;
0962     struct htt_cmd *cmd;
0963     struct htt_rx_ring_setup_ring32 *ring;
0964     const int num_rx_ring = 1;
0965     u16 flags;
0966     int len;
0967     int ret;
0968 
0969     /*
0970      * the HW expects the buffer to be an integral number of 4-byte
0971      * "words"
0972      */
0973     BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
0974     BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
0975 
0976     len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
0977         + (sizeof(*ring) * num_rx_ring);
0978     skb = ath10k_htc_alloc_skb(ar, len);
0979     if (!skb)
0980         return -ENOMEM;
0981 
0982     skb_put(skb, len);
0983 
0984     cmd = (struct htt_cmd *)skb->data;
0985     ring = &cmd->rx_setup_32.rings[0];
0986 
0987     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
0988     cmd->rx_setup_32.hdr.num_rings = 1;
0989 
0990     flags = 0;
0991     flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
0992     flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
0993     flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
0994 
0995     memset(ring, 0, sizeof(*ring));
0996     ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
0997     ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
0998     ring->flags = __cpu_to_le16(flags);
0999 
1000     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1001     if (ret) {
1002         dev_kfree_skb_any(skb);
1003         return ret;
1004     }
1005 
1006     return 0;
1007 }
1008 
1009 static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt,
1010                       u8 max_subfrms_ampdu,
1011                       u8 max_subfrms_amsdu)
1012 {
1013     struct ath10k *ar = htt->ar;
1014     struct htt_aggr_conf *aggr_conf;
1015     struct sk_buff *skb;
1016     struct htt_cmd *cmd;
1017     int len;
1018     int ret;
1019 
1020     /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1021 
1022     if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1023         return -EINVAL;
1024 
1025     if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1026         return -EINVAL;
1027 
1028     len = sizeof(cmd->hdr);
1029     len += sizeof(cmd->aggr_conf);
1030 
1031     skb = ath10k_htc_alloc_skb(ar, len);
1032     if (!skb)
1033         return -ENOMEM;
1034 
1035     skb_put(skb, len);
1036     cmd = (struct htt_cmd *)skb->data;
1037     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1038 
1039     aggr_conf = &cmd->aggr_conf;
1040     aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1041     aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1042 
1043     ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1044            aggr_conf->max_num_amsdu_subframes,
1045            aggr_conf->max_num_ampdu_subframes);
1046 
1047     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1048     if (ret) {
1049         dev_kfree_skb_any(skb);
1050         return ret;
1051     }
1052 
1053     return 0;
1054 }
1055 
1056 static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt,
1057                       u8 max_subfrms_ampdu,
1058                       u8 max_subfrms_amsdu)
1059 {
1060     struct ath10k *ar = htt->ar;
1061     struct htt_aggr_conf_v2 *aggr_conf;
1062     struct sk_buff *skb;
1063     struct htt_cmd *cmd;
1064     int len;
1065     int ret;
1066 
1067     /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1068 
1069     if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1070         return -EINVAL;
1071 
1072     if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1073         return -EINVAL;
1074 
1075     len = sizeof(cmd->hdr);
1076     len += sizeof(cmd->aggr_conf_v2);
1077 
1078     skb = ath10k_htc_alloc_skb(ar, len);
1079     if (!skb)
1080         return -ENOMEM;
1081 
1082     skb_put(skb, len);
1083     cmd = (struct htt_cmd *)skb->data;
1084     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1085 
1086     aggr_conf = &cmd->aggr_conf_v2;
1087     aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1088     aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1089 
1090     ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1091            aggr_conf->max_num_amsdu_subframes,
1092            aggr_conf->max_num_ampdu_subframes);
1093 
1094     ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1095     if (ret) {
1096         dev_kfree_skb_any(skb);
1097         return ret;
1098     }
1099 
1100     return 0;
1101 }
1102 
1103 int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
1104                  __le32 token,
1105                  __le16 fetch_seq_num,
1106                  struct htt_tx_fetch_record *records,
1107                  size_t num_records)
1108 {
1109     struct sk_buff *skb;
1110     struct htt_cmd *cmd;
1111     const u16 resp_id = 0;
1112     int len = 0;
1113     int ret;
1114 
1115     /* Response IDs are echo-ed back only for host driver convienence
1116      * purposes. They aren't used for anything in the driver yet so use 0.
1117      */
1118 
1119     len += sizeof(cmd->hdr);
1120     len += sizeof(cmd->tx_fetch_resp);
1121     len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
1122 
1123     skb = ath10k_htc_alloc_skb(ar, len);
1124     if (!skb)
1125         return -ENOMEM;
1126 
1127     skb_put(skb, len);
1128     cmd = (struct htt_cmd *)skb->data;
1129     cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
1130     cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
1131     cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
1132     cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
1133     cmd->tx_fetch_resp.token = token;
1134 
1135     memcpy(cmd->tx_fetch_resp.records, records,
1136            sizeof(records[0]) * num_records);
1137 
1138     ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
1139     if (ret) {
1140         ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
1141         goto err_free_skb;
1142     }
1143 
1144     return 0;
1145 
1146 err_free_skb:
1147     dev_kfree_skb_any(skb);
1148 
1149     return ret;
1150 }
1151 
1152 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
1153 {
1154     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1155     struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1156     struct ath10k_vif *arvif;
1157 
1158     if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1159         return ar->scan.vdev_id;
1160     } else if (cb->vif) {
1161         arvif = (void *)cb->vif->drv_priv;
1162         return arvif->vdev_id;
1163     } else if (ar->monitor_started) {
1164         return ar->monitor_vdev_id;
1165     } else {
1166         return 0;
1167     }
1168 }
1169 
1170 static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
1171 {
1172     struct ieee80211_hdr *hdr = (void *)skb->data;
1173     struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1174 
1175     if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
1176         return HTT_DATA_TX_EXT_TID_MGMT;
1177     else if (cb->flags & ATH10K_SKB_F_QOS)
1178         return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1179     else
1180         return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1181 }
1182 
1183 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
1184 {
1185     struct ath10k *ar = htt->ar;
1186     struct device *dev = ar->dev;
1187     struct sk_buff *txdesc = NULL;
1188     struct htt_cmd *cmd;
1189     struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1190     u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1191     int len = 0;
1192     int msdu_id = -1;
1193     int res;
1194     const u8 *peer_addr;
1195     struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1196 
1197     len += sizeof(cmd->hdr);
1198     len += sizeof(cmd->mgmt_tx);
1199 
1200     res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1201     if (res < 0)
1202         goto err;
1203 
1204     msdu_id = res;
1205 
1206     if ((ieee80211_is_action(hdr->frame_control) ||
1207          ieee80211_is_deauth(hdr->frame_control) ||
1208          ieee80211_is_disassoc(hdr->frame_control)) &&
1209          ieee80211_has_protected(hdr->frame_control)) {
1210         peer_addr = hdr->addr1;
1211         if (is_multicast_ether_addr(peer_addr)) {
1212             skb_put(msdu, sizeof(struct ieee80211_mmie_16));
1213         } else {
1214             if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
1215                 skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256)
1216                 skb_put(msdu, IEEE80211_GCMP_MIC_LEN);
1217             else
1218                 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1219         }
1220     }
1221 
1222     txdesc = ath10k_htc_alloc_skb(ar, len);
1223     if (!txdesc) {
1224         res = -ENOMEM;
1225         goto err_free_msdu_id;
1226     }
1227 
1228     skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1229                        DMA_TO_DEVICE);
1230     res = dma_mapping_error(dev, skb_cb->paddr);
1231     if (res) {
1232         res = -EIO;
1233         goto err_free_txdesc;
1234     }
1235 
1236     skb_put(txdesc, len);
1237     cmd = (struct htt_cmd *)txdesc->data;
1238     memset(cmd, 0, len);
1239 
1240     cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
1241     cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
1242     cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
1243     cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
1244     cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
1245     memcpy(cmd->mgmt_tx.hdr, msdu->data,
1246            min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
1247 
1248     res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
1249     if (res)
1250         goto err_unmap_msdu;
1251 
1252     return 0;
1253 
1254 err_unmap_msdu:
1255     if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
1256         dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1257 err_free_txdesc:
1258     dev_kfree_skb_any(txdesc);
1259 err_free_msdu_id:
1260     spin_lock_bh(&htt->tx_lock);
1261     ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1262     spin_unlock_bh(&htt->tx_lock);
1263 err:
1264     return res;
1265 }
1266 
1267 #define HTT_TX_HL_NEEDED_HEADROOM \
1268     (unsigned int)(sizeof(struct htt_cmd_hdr) + \
1269     sizeof(struct htt_data_tx_desc) + \
1270     sizeof(struct ath10k_htc_hdr))
1271 
1272 static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1273                 struct sk_buff *msdu)
1274 {
1275     struct ath10k *ar = htt->ar;
1276     int res, data_len;
1277     struct htt_cmd_hdr *cmd_hdr;
1278     struct htt_data_tx_desc *tx_desc;
1279     struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1280     struct sk_buff *tmp_skb;
1281     bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1282     u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1283     u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1284     u8 flags0 = 0;
1285     u16 flags1 = 0;
1286     u16 msdu_id = 0;
1287 
1288     if (!is_eth) {
1289         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1290 
1291         if ((ieee80211_is_action(hdr->frame_control) ||
1292              ieee80211_is_deauth(hdr->frame_control) ||
1293              ieee80211_is_disassoc(hdr->frame_control)) &&
1294              ieee80211_has_protected(hdr->frame_control)) {
1295             skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1296         }
1297     }
1298 
1299     data_len = msdu->len;
1300 
1301     switch (txmode) {
1302     case ATH10K_HW_TXRX_RAW:
1303     case ATH10K_HW_TXRX_NATIVE_WIFI:
1304         flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1305         fallthrough;
1306     case ATH10K_HW_TXRX_ETHERNET:
1307         flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1308         break;
1309     case ATH10K_HW_TXRX_MGMT:
1310         flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1311                  HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1312         flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1313 
1314         if (htt->disable_tx_comp)
1315             flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;
1316         break;
1317     }
1318 
1319     if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1320         flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1321 
1322     flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1323     flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1324     if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1325         !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1326         flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1327         flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1328     }
1329 
1330     /* Prepend the HTT header and TX desc struct to the data message
1331      * and realloc the skb if it does not have enough headroom.
1332      */
1333     if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
1334         tmp_skb = msdu;
1335 
1336         ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
1337                "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
1338                skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
1339         msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
1340         kfree_skb(tmp_skb);
1341         if (!msdu) {
1342             ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
1343             res = -ENOMEM;
1344             goto out;
1345         }
1346     }
1347 
1348     if (ar->bus_param.hl_msdu_ids) {
1349         flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1350         res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1351         if (res < 0) {
1352             ath10k_err(ar, "msdu_id allocation failed %d\n", res);
1353             goto out;
1354         }
1355         msdu_id = res;
1356     }
1357 
1358     /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
1359      * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
1360      * reference by one to avoid a use-after-free case and a double
1361      * free.
1362      */
1363     skb_get(msdu);
1364 
1365     skb_push(msdu, sizeof(*cmd_hdr));
1366     skb_push(msdu, sizeof(*tx_desc));
1367     cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
1368     tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
1369 
1370     cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1371     tx_desc->flags0 = flags0;
1372     tx_desc->flags1 = __cpu_to_le16(flags1);
1373     tx_desc->len = __cpu_to_le16(data_len);
1374     tx_desc->id = __cpu_to_le16(msdu_id);
1375     tx_desc->frags_paddr = 0; /* always zero */
1376     /* Initialize peer_id to INVALID_PEER because this is NOT
1377      * Reinjection path
1378      */
1379     tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
1380 
1381     res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
1382 
1383 out:
1384     return res;
1385 }
1386 
1387 static int ath10k_htt_tx_32(struct ath10k_htt *htt,
1388                 enum ath10k_hw_txrx_mode txmode,
1389                 struct sk_buff *msdu)
1390 {
1391     struct ath10k *ar = htt->ar;
1392     struct device *dev = ar->dev;
1393     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1394     struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1395     struct ath10k_hif_sg_item sg_items[2];
1396     struct ath10k_htt_txbuf_32 *txbuf;
1397     struct htt_data_tx_desc_frag *frags;
1398     bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1399     u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1400     u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1401     int prefetch_len;
1402     int res;
1403     u8 flags0 = 0;
1404     u16 msdu_id, flags1 = 0;
1405     u16 freq = 0;
1406     u32 frags_paddr = 0;
1407     u32 txbuf_paddr;
1408     struct htt_msdu_ext_desc *ext_desc = NULL;
1409     struct htt_msdu_ext_desc *ext_desc_t = NULL;
1410 
1411     res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1412     if (res < 0)
1413         goto err;
1414 
1415     msdu_id = res;
1416 
1417     prefetch_len = min(htt->prefetch_len, msdu->len);
1418     prefetch_len = roundup(prefetch_len, 4);
1419 
1420     txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
1421     txbuf_paddr = htt->txbuf.paddr +
1422               (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
1423 
1424     if (!is_eth) {
1425         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1426 
1427         if ((ieee80211_is_action(hdr->frame_control) ||
1428              ieee80211_is_deauth(hdr->frame_control) ||
1429              ieee80211_is_disassoc(hdr->frame_control)) &&
1430              ieee80211_has_protected(hdr->frame_control)) {
1431             skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1432         } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1433                txmode == ATH10K_HW_TXRX_RAW &&
1434                ieee80211_has_protected(hdr->frame_control)) {
1435             skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1436         }
1437     }
1438 
1439     skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1440                        DMA_TO_DEVICE);
1441     res = dma_mapping_error(dev, skb_cb->paddr);
1442     if (res) {
1443         res = -EIO;
1444         goto err_free_msdu_id;
1445     }
1446 
1447     if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1448         freq = ar->scan.roc_freq;
1449 
1450     switch (txmode) {
1451     case ATH10K_HW_TXRX_RAW:
1452     case ATH10K_HW_TXRX_NATIVE_WIFI:
1453         flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1454         fallthrough;
1455     case ATH10K_HW_TXRX_ETHERNET:
1456         if (ar->hw_params.continuous_frag_desc) {
1457             ext_desc_t = htt->frag_desc.vaddr_desc_32;
1458             memset(&ext_desc_t[msdu_id], 0,
1459                    sizeof(struct htt_msdu_ext_desc));
1460             frags = (struct htt_data_tx_desc_frag *)
1461                 &ext_desc_t[msdu_id].frags;
1462             ext_desc = &ext_desc_t[msdu_id];
1463             frags[0].tword_addr.paddr_lo =
1464                 __cpu_to_le32(skb_cb->paddr);
1465             frags[0].tword_addr.paddr_hi = 0;
1466             frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1467 
1468             frags_paddr =  htt->frag_desc.paddr +
1469                 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
1470         } else {
1471             frags = txbuf->frags;
1472             frags[0].dword_addr.paddr =
1473                 __cpu_to_le32(skb_cb->paddr);
1474             frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
1475             frags[1].dword_addr.paddr = 0;
1476             frags[1].dword_addr.len = 0;
1477 
1478             frags_paddr = txbuf_paddr;
1479         }
1480         flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1481         break;
1482     case ATH10K_HW_TXRX_MGMT:
1483         flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1484                  HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1485         flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1486 
1487         frags_paddr = skb_cb->paddr;
1488         break;
1489     }
1490 
1491     /* Normally all commands go through HTC which manages tx credits for
1492      * each endpoint and notifies when tx is completed.
1493      *
1494      * HTT endpoint is creditless so there's no need to care about HTC
1495      * flags. In that case it is trivial to fill the HTC header here.
1496      *
1497      * MSDU transmission is considered completed upon HTT event. This
1498      * implies no relevant resources can be freed until after the event is
1499      * received. That's why HTC tx completion handler itself is ignored by
1500      * setting NULL to transfer_context for all sg items.
1501      *
1502      * There is simply no point in pushing HTT TX_FRM through HTC tx path
1503      * as it's a waste of resources. By bypassing HTC it is possible to
1504      * avoid extra memory allocations, compress data structures and thus
1505      * improve performance.
1506      */
1507 
1508     txbuf->htc_hdr.eid = htt->eid;
1509     txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1510                        sizeof(txbuf->cmd_tx) +
1511                        prefetch_len);
1512     txbuf->htc_hdr.flags = 0;
1513 
1514     if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1515         flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1516 
1517     flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1518     flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1519     if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1520         !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1521         flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1522         flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1523         if (ar->hw_params.continuous_frag_desc)
1524             ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
1525     }
1526 
1527     /* Prevent firmware from sending up tx inspection requests. There's
1528      * nothing ath10k can do with frames requested for inspection so force
1529      * it to simply rely a regular tx completion with discard status.
1530      */
1531     flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1532 
1533     txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1534     txbuf->cmd_tx.flags0 = flags0;
1535     txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1536     txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1537     txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1538     txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
1539     if (ath10k_mac_tx_frm_has_freq(ar)) {
1540         txbuf->cmd_tx.offchan_tx.peerid =
1541                 __cpu_to_le16(HTT_INVALID_PEERID);
1542         txbuf->cmd_tx.offchan_tx.freq =
1543                 __cpu_to_le16(freq);
1544     } else {
1545         txbuf->cmd_tx.peerid =
1546                 __cpu_to_le32(HTT_INVALID_PEERID);
1547     }
1548 
1549     trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1550     ath10k_dbg(ar, ATH10K_DBG_HTT,
1551            "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
1552            flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1553            &skb_cb->paddr, vdev_id, tid, freq);
1554     ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1555             msdu->data, msdu->len);
1556     trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1557     trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1558 
1559     sg_items[0].transfer_id = 0;
1560     sg_items[0].transfer_context = NULL;
1561     sg_items[0].vaddr = &txbuf->htc_hdr;
1562     sg_items[0].paddr = txbuf_paddr +
1563                 sizeof(txbuf->frags);
1564     sg_items[0].len = sizeof(txbuf->htc_hdr) +
1565               sizeof(txbuf->cmd_hdr) +
1566               sizeof(txbuf->cmd_tx);
1567 
1568     sg_items[1].transfer_id = 0;
1569     sg_items[1].transfer_context = NULL;
1570     sg_items[1].vaddr = msdu->data;
1571     sg_items[1].paddr = skb_cb->paddr;
1572     sg_items[1].len = prefetch_len;
1573 
1574     res = ath10k_hif_tx_sg(htt->ar,
1575                    htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1576                    sg_items, ARRAY_SIZE(sg_items));
1577     if (res)
1578         goto err_unmap_msdu;
1579 
1580     return 0;
1581 
1582 err_unmap_msdu:
1583     dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1584 err_free_msdu_id:
1585     spin_lock_bh(&htt->tx_lock);
1586     ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1587     spin_unlock_bh(&htt->tx_lock);
1588 err:
1589     return res;
1590 }
1591 
1592 static int ath10k_htt_tx_64(struct ath10k_htt *htt,
1593                 enum ath10k_hw_txrx_mode txmode,
1594                 struct sk_buff *msdu)
1595 {
1596     struct ath10k *ar = htt->ar;
1597     struct device *dev = ar->dev;
1598     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1599     struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1600     struct ath10k_hif_sg_item sg_items[2];
1601     struct ath10k_htt_txbuf_64 *txbuf;
1602     struct htt_data_tx_desc_frag *frags;
1603     bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1604     u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1605     u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1606     int prefetch_len;
1607     int res;
1608     u8 flags0 = 0;
1609     u16 msdu_id, flags1 = 0;
1610     u16 freq = 0;
1611     dma_addr_t frags_paddr = 0;
1612     dma_addr_t txbuf_paddr;
1613     struct htt_msdu_ext_desc_64 *ext_desc = NULL;
1614     struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
1615 
1616     res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1617     if (res < 0)
1618         goto err;
1619 
1620     msdu_id = res;
1621 
1622     prefetch_len = min(htt->prefetch_len, msdu->len);
1623     prefetch_len = roundup(prefetch_len, 4);
1624 
1625     txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
1626     txbuf_paddr = htt->txbuf.paddr +
1627               (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
1628 
1629     if (!is_eth) {
1630         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1631 
1632         if ((ieee80211_is_action(hdr->frame_control) ||
1633              ieee80211_is_deauth(hdr->frame_control) ||
1634              ieee80211_is_disassoc(hdr->frame_control)) &&
1635              ieee80211_has_protected(hdr->frame_control)) {
1636             skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1637         } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1638                txmode == ATH10K_HW_TXRX_RAW &&
1639                ieee80211_has_protected(hdr->frame_control)) {
1640             skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1641         }
1642     }
1643 
1644     skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1645                        DMA_TO_DEVICE);
1646     res = dma_mapping_error(dev, skb_cb->paddr);
1647     if (res) {
1648         res = -EIO;
1649         goto err_free_msdu_id;
1650     }
1651 
1652     if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1653         freq = ar->scan.roc_freq;
1654 
1655     switch (txmode) {
1656     case ATH10K_HW_TXRX_RAW:
1657     case ATH10K_HW_TXRX_NATIVE_WIFI:
1658         flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1659         fallthrough;
1660     case ATH10K_HW_TXRX_ETHERNET:
1661         if (ar->hw_params.continuous_frag_desc) {
1662             ext_desc_t = htt->frag_desc.vaddr_desc_64;
1663             memset(&ext_desc_t[msdu_id], 0,
1664                    sizeof(struct htt_msdu_ext_desc_64));
1665             frags = (struct htt_data_tx_desc_frag *)
1666                 &ext_desc_t[msdu_id].frags;
1667             ext_desc = &ext_desc_t[msdu_id];
1668             frags[0].tword_addr.paddr_lo =
1669                 __cpu_to_le32(skb_cb->paddr);
1670             frags[0].tword_addr.paddr_hi =
1671                 __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1672             frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1673 
1674             frags_paddr =  htt->frag_desc.paddr +
1675                (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
1676         } else {
1677             frags = txbuf->frags;
1678             frags[0].tword_addr.paddr_lo =
1679                         __cpu_to_le32(skb_cb->paddr);
1680             frags[0].tword_addr.paddr_hi =
1681                 __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1682             frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1683             frags[1].tword_addr.paddr_lo = 0;
1684             frags[1].tword_addr.paddr_hi = 0;
1685             frags[1].tword_addr.len_16 = 0;
1686         }
1687         flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1688         break;
1689     case ATH10K_HW_TXRX_MGMT:
1690         flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1691                  HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1692         flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1693 
1694         frags_paddr = skb_cb->paddr;
1695         break;
1696     }
1697 
1698     /* Normally all commands go through HTC which manages tx credits for
1699      * each endpoint and notifies when tx is completed.
1700      *
1701      * HTT endpoint is creditless so there's no need to care about HTC
1702      * flags. In that case it is trivial to fill the HTC header here.
1703      *
1704      * MSDU transmission is considered completed upon HTT event. This
1705      * implies no relevant resources can be freed until after the event is
1706      * received. That's why HTC tx completion handler itself is ignored by
1707      * setting NULL to transfer_context for all sg items.
1708      *
1709      * There is simply no point in pushing HTT TX_FRM through HTC tx path
1710      * as it's a waste of resources. By bypassing HTC it is possible to
1711      * avoid extra memory allocations, compress data structures and thus
1712      * improve performance.
1713      */
1714 
1715     txbuf->htc_hdr.eid = htt->eid;
1716     txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1717                        sizeof(txbuf->cmd_tx) +
1718                        prefetch_len);
1719     txbuf->htc_hdr.flags = 0;
1720 
1721     if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1722         flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1723 
1724     flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1725     flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1726     if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1727         !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1728         flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1729         flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1730         if (ar->hw_params.continuous_frag_desc) {
1731             memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
1732             ext_desc->tso_flag[3] |=
1733                 __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
1734         }
1735     }
1736 
1737     /* Prevent firmware from sending up tx inspection requests. There's
1738      * nothing ath10k can do with frames requested for inspection so force
1739      * it to simply rely a regular tx completion with discard status.
1740      */
1741     flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1742 
1743     txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1744     txbuf->cmd_tx.flags0 = flags0;
1745     txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1746     txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1747     txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1748 
1749     /* fill fragment descriptor */
1750     txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
1751     if (ath10k_mac_tx_frm_has_freq(ar)) {
1752         txbuf->cmd_tx.offchan_tx.peerid =
1753                 __cpu_to_le16(HTT_INVALID_PEERID);
1754         txbuf->cmd_tx.offchan_tx.freq =
1755                 __cpu_to_le16(freq);
1756     } else {
1757         txbuf->cmd_tx.peerid =
1758                 __cpu_to_le32(HTT_INVALID_PEERID);
1759     }
1760 
1761     trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1762     ath10k_dbg(ar, ATH10K_DBG_HTT,
1763            "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
1764            flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1765            &skb_cb->paddr, vdev_id, tid, freq);
1766     ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1767             msdu->data, msdu->len);
1768     trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1769     trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1770 
1771     sg_items[0].transfer_id = 0;
1772     sg_items[0].transfer_context = NULL;
1773     sg_items[0].vaddr = &txbuf->htc_hdr;
1774     sg_items[0].paddr = txbuf_paddr +
1775                 sizeof(txbuf->frags);
1776     sg_items[0].len = sizeof(txbuf->htc_hdr) +
1777               sizeof(txbuf->cmd_hdr) +
1778               sizeof(txbuf->cmd_tx);
1779 
1780     sg_items[1].transfer_id = 0;
1781     sg_items[1].transfer_context = NULL;
1782     sg_items[1].vaddr = msdu->data;
1783     sg_items[1].paddr = skb_cb->paddr;
1784     sg_items[1].len = prefetch_len;
1785 
1786     res = ath10k_hif_tx_sg(htt->ar,
1787                    htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1788                    sg_items, ARRAY_SIZE(sg_items));
1789     if (res)
1790         goto err_unmap_msdu;
1791 
1792     return 0;
1793 
1794 err_unmap_msdu:
1795     dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1796 err_free_msdu_id:
1797     spin_lock_bh(&htt->tx_lock);
1798     ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1799     spin_unlock_bh(&htt->tx_lock);
1800 err:
1801     return res;
1802 }
1803 
1804 static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
1805     .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
1806     .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1807     .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
1808     .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
1809     .htt_tx = ath10k_htt_tx_32,
1810     .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
1811     .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
1812     .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1813 };
1814 
1815 static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
1816     .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
1817     .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
1818     .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
1819     .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
1820     .htt_tx = ath10k_htt_tx_64,
1821     .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
1822     .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
1823     .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2,
1824 };
1825 
1826 static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
1827     .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
1828     .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1829     .htt_tx = ath10k_htt_tx_hl,
1830     .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1831     .htt_flush_tx = ath10k_htt_flush_tx_queue,
1832 };
1833 
1834 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
1835 {
1836     struct ath10k *ar = htt->ar;
1837 
1838     if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
1839         htt->tx_ops = &htt_tx_ops_hl;
1840     else if (ar->hw_params.target_64bit)
1841         htt->tx_ops = &htt_tx_ops_64;
1842     else
1843         htt->tx_ops = &htt_tx_ops_32;
1844 }