Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: BSD-3-Clause-Clear
0002 /*
0003  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
0004  */
0005 
0006 #include <crypto/hash.h>
0007 #include "core.h"
0008 #include "dp_tx.h"
0009 #include "hal_tx.h"
0010 #include "hif.h"
0011 #include "debug.h"
0012 #include "dp_rx.h"
0013 #include "peer.h"
0014 
0015 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
0016                       struct sk_buff *skb)
0017 {
0018     dev_kfree_skb_any(skb);
0019 }
0020 
0021 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
0022 {
0023     struct ath11k_base *ab = ar->ab;
0024     struct ath11k_peer *peer;
0025 
0026     /* TODO: Any other peer specific DP cleanup */
0027 
0028     spin_lock_bh(&ab->base_lock);
0029     peer = ath11k_peer_find(ab, vdev_id, addr);
0030     if (!peer) {
0031         ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
0032                 addr, vdev_id);
0033         spin_unlock_bh(&ab->base_lock);
0034         return;
0035     }
0036 
0037     ath11k_peer_rx_tid_cleanup(ar, peer);
0038     crypto_free_shash(peer->tfm_mmic);
0039     spin_unlock_bh(&ab->base_lock);
0040 }
0041 
0042 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
0043 {
0044     struct ath11k_base *ab = ar->ab;
0045     struct ath11k_peer *peer;
0046     u32 reo_dest;
0047     int ret = 0, tid;
0048 
0049     /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
0050     reo_dest = ar->dp.mac_id + 1;
0051     ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
0052                     WMI_PEER_SET_DEFAULT_ROUTING,
0053                     DP_RX_HASH_ENABLE | (reo_dest << 1));
0054 
0055     if (ret) {
0056         ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
0057                 ret, addr, vdev_id);
0058         return ret;
0059     }
0060 
0061     for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
0062         ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
0063                            HAL_PN_TYPE_NONE);
0064         if (ret) {
0065             ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
0066                     tid, ret);
0067             goto peer_clean;
0068         }
0069     }
0070 
0071     ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
0072     if (ret) {
0073         ath11k_warn(ab, "failed to setup rx defrag context\n");
0074         return ret;
0075     }
0076 
0077     /* TODO: Setup other peer specific resource used in data path */
0078 
0079     return 0;
0080 
0081 peer_clean:
0082     spin_lock_bh(&ab->base_lock);
0083 
0084     peer = ath11k_peer_find(ab, vdev_id, addr);
0085     if (!peer) {
0086         ath11k_warn(ab, "failed to find the peer to del rx tid\n");
0087         spin_unlock_bh(&ab->base_lock);
0088         return -ENOENT;
0089     }
0090 
0091     for (; tid >= 0; tid--)
0092         ath11k_peer_rx_tid_delete(ar, peer, tid);
0093 
0094     spin_unlock_bh(&ab->base_lock);
0095 
0096     return ret;
0097 }
0098 
0099 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
0100 {
0101     if (!ring->vaddr_unaligned)
0102         return;
0103 
0104     if (ring->cached)
0105         kfree(ring->vaddr_unaligned);
0106     else
0107         dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
0108                   ring->paddr_unaligned);
0109 
0110     ring->vaddr_unaligned = NULL;
0111 }
0112 
0113 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
0114 {
0115     int ext_group_num;
0116     u8 mask = 1 << ring_num;
0117 
0118     for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
0119          ext_group_num++) {
0120         if (mask & grp_mask[ext_group_num])
0121             return ext_group_num;
0122     }
0123 
0124     return -ENOENT;
0125 }
0126 
0127 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
0128                           enum hal_ring_type type, int ring_num)
0129 {
0130     const u8 *grp_mask;
0131 
0132     switch (type) {
0133     case HAL_WBM2SW_RELEASE:
0134         if (ring_num < 3) {
0135             grp_mask = &ab->hw_params.ring_mask->tx[0];
0136         } else if (ring_num == 3) {
0137             grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
0138             ring_num = 0;
0139         } else {
0140             return -ENOENT;
0141         }
0142         break;
0143     case HAL_REO_EXCEPTION:
0144         grp_mask = &ab->hw_params.ring_mask->rx_err[0];
0145         break;
0146     case HAL_REO_DST:
0147         grp_mask = &ab->hw_params.ring_mask->rx[0];
0148         break;
0149     case HAL_REO_STATUS:
0150         grp_mask = &ab->hw_params.ring_mask->reo_status[0];
0151         break;
0152     case HAL_RXDMA_MONITOR_STATUS:
0153     case HAL_RXDMA_MONITOR_DST:
0154         grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
0155         break;
0156     case HAL_RXDMA_DST:
0157         grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
0158         break;
0159     case HAL_RXDMA_BUF:
0160         grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
0161         break;
0162     case HAL_RXDMA_MONITOR_BUF:
0163     case HAL_TCL_DATA:
0164     case HAL_TCL_CMD:
0165     case HAL_REO_CMD:
0166     case HAL_SW2WBM_RELEASE:
0167     case HAL_WBM_IDLE_LINK:
0168     case HAL_TCL_STATUS:
0169     case HAL_REO_REINJECT:
0170     case HAL_CE_SRC:
0171     case HAL_CE_DST:
0172     case HAL_CE_DST_STATUS:
0173     default:
0174         return -ENOENT;
0175     }
0176 
0177     return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
0178 }
0179 
0180 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
0181                      struct hal_srng_params *ring_params,
0182                      enum hal_ring_type type, int ring_num)
0183 {
0184     int msi_group_number, msi_data_count;
0185     u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
0186     int ret;
0187 
0188     ret = ath11k_get_user_msi_vector(ab, "DP",
0189                      &msi_data_count, &msi_data_start,
0190                      &msi_irq_start);
0191     if (ret)
0192         return;
0193 
0194     msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
0195                                   ring_num);
0196     if (msi_group_number < 0) {
0197         ath11k_dbg(ab, ATH11K_DBG_PCI,
0198                "ring not part of an ext_group; ring_type: %d,ring_num %d",
0199                type, ring_num);
0200         ring_params->msi_addr = 0;
0201         ring_params->msi_data = 0;
0202         return;
0203     }
0204 
0205     if (msi_group_number > msi_data_count) {
0206         ath11k_dbg(ab, ATH11K_DBG_PCI,
0207                "multiple msi_groups share one msi, msi_group_num %d",
0208                msi_group_number);
0209     }
0210 
0211     ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
0212 
0213     ring_params->msi_addr = addr_lo;
0214     ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
0215     ring_params->msi_data = (msi_group_number % msi_data_count)
0216         + msi_data_start;
0217     ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
0218 }
0219 
0220 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
0221              enum hal_ring_type type, int ring_num,
0222              int mac_id, int num_entries)
0223 {
0224     struct hal_srng_params params = { 0 };
0225     int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
0226     int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
0227     int ret;
0228     bool cached = false;
0229 
0230     if (max_entries < 0 || entry_sz < 0)
0231         return -EINVAL;
0232 
0233     if (num_entries > max_entries)
0234         num_entries = max_entries;
0235 
0236     ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
0237 
0238     if (ab->hw_params.alloc_cacheable_memory) {
0239         /* Allocate the reo dst and tx completion rings from cacheable memory */
0240         switch (type) {
0241         case HAL_REO_DST:
0242         case HAL_WBM2SW_RELEASE:
0243             cached = true;
0244             break;
0245         default:
0246             cached = false;
0247         }
0248 
0249         if (cached) {
0250             ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
0251             ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
0252         }
0253     }
0254 
0255     if (!cached)
0256         ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
0257                                &ring->paddr_unaligned,
0258                                GFP_KERNEL);
0259 
0260     if (!ring->vaddr_unaligned)
0261         return -ENOMEM;
0262 
0263     ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
0264     ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
0265               (unsigned long)ring->vaddr_unaligned);
0266 
0267     params.ring_base_vaddr = ring->vaddr;
0268     params.ring_base_paddr = ring->paddr;
0269     params.num_entries = num_entries;
0270     ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
0271 
0272     switch (type) {
0273     case HAL_REO_DST:
0274         params.intr_batch_cntr_thres_entries =
0275                     HAL_SRNG_INT_BATCH_THRESHOLD_RX;
0276         params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
0277         break;
0278     case HAL_RXDMA_BUF:
0279     case HAL_RXDMA_MONITOR_BUF:
0280     case HAL_RXDMA_MONITOR_STATUS:
0281         params.low_threshold = num_entries >> 3;
0282         params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
0283         params.intr_batch_cntr_thres_entries = 0;
0284         params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
0285         break;
0286     case HAL_WBM2SW_RELEASE:
0287         if (ring_num < 3) {
0288             params.intr_batch_cntr_thres_entries =
0289                     HAL_SRNG_INT_BATCH_THRESHOLD_TX;
0290             params.intr_timer_thres_us =
0291                     HAL_SRNG_INT_TIMER_THRESHOLD_TX;
0292             break;
0293         }
0294         /* follow through when ring_num >= 3 */
0295         fallthrough;
0296     case HAL_REO_EXCEPTION:
0297     case HAL_REO_REINJECT:
0298     case HAL_REO_CMD:
0299     case HAL_REO_STATUS:
0300     case HAL_TCL_DATA:
0301     case HAL_TCL_CMD:
0302     case HAL_TCL_STATUS:
0303     case HAL_WBM_IDLE_LINK:
0304     case HAL_SW2WBM_RELEASE:
0305     case HAL_RXDMA_DST:
0306     case HAL_RXDMA_MONITOR_DST:
0307     case HAL_RXDMA_MONITOR_DESC:
0308         params.intr_batch_cntr_thres_entries =
0309                     HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
0310         params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
0311         break;
0312     case HAL_RXDMA_DIR_BUF:
0313         break;
0314     default:
0315         ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
0316         return -EINVAL;
0317     }
0318 
0319     if (cached) {
0320         params.flags |= HAL_SRNG_FLAGS_CACHED;
0321         ring->cached = 1;
0322     }
0323 
0324     ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
0325     if (ret < 0) {
0326         ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
0327                 ret, ring_num);
0328         return ret;
0329     }
0330 
0331     ring->ring_id = ret;
0332 
0333     return 0;
0334 }
0335 
0336 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
0337 {
0338     int i;
0339 
0340     if (!ab->hw_params.supports_shadow_regs)
0341         return;
0342 
0343     for (i = 0; i < ab->hw_params.max_tx_ring; i++)
0344         ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
0345 
0346     ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
0347 }
0348 
0349 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
0350 {
0351     struct ath11k_dp *dp = &ab->dp;
0352     int i;
0353 
0354     ath11k_dp_stop_shadow_timers(ab);
0355     ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
0356     ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
0357     ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
0358     for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
0359         ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
0360         ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
0361     }
0362     ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
0363     ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
0364     ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
0365     ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
0366     ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
0367 }
0368 
0369 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
0370 {
0371     struct ath11k_dp *dp = &ab->dp;
0372     struct hal_srng *srng;
0373     int i, ret;
0374 
0375     ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
0376                    HAL_SW2WBM_RELEASE, 0, 0,
0377                    DP_WBM_RELEASE_RING_SIZE);
0378     if (ret) {
0379         ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
0380                 ret);
0381         goto err;
0382     }
0383 
0384     ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
0385                    DP_TCL_CMD_RING_SIZE);
0386     if (ret) {
0387         ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
0388         goto err;
0389     }
0390 
0391     ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
0392                    0, 0, DP_TCL_STATUS_RING_SIZE);
0393     if (ret) {
0394         ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
0395         goto err;
0396     }
0397 
0398     for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
0399         ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
0400                        HAL_TCL_DATA, i, 0,
0401                        DP_TCL_DATA_RING_SIZE);
0402         if (ret) {
0403             ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
0404                     i, ret);
0405             goto err;
0406         }
0407 
0408         ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
0409                        HAL_WBM2SW_RELEASE, i, 0,
0410                        DP_TX_COMP_RING_SIZE);
0411         if (ret) {
0412             ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
0413                     i, ret);
0414             goto err;
0415         }
0416 
0417         srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
0418         ath11k_hal_tx_init_data_ring(ab, srng);
0419 
0420         ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
0421                         ATH11K_SHADOW_DP_TIMER_INTERVAL,
0422                         dp->tx_ring[i].tcl_data_ring.ring_id);
0423     }
0424 
0425     ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
0426                    0, 0, DP_REO_REINJECT_RING_SIZE);
0427     if (ret) {
0428         ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
0429                 ret);
0430         goto err;
0431     }
0432 
0433     ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
0434                    3, 0, DP_RX_RELEASE_RING_SIZE);
0435     if (ret) {
0436         ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
0437         goto err;
0438     }
0439 
0440     ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
0441                    0, 0, DP_REO_EXCEPTION_RING_SIZE);
0442     if (ret) {
0443         ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
0444                 ret);
0445         goto err;
0446     }
0447 
0448     ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
0449                    0, 0, DP_REO_CMD_RING_SIZE);
0450     if (ret) {
0451         ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
0452         goto err;
0453     }
0454 
0455     srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
0456     ath11k_hal_reo_init_cmd_ring(ab, srng);
0457 
0458     ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
0459                     ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
0460                     dp->reo_cmd_ring.ring_id);
0461 
0462     ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
0463                    0, 0, DP_REO_STATUS_RING_SIZE);
0464     if (ret) {
0465         ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
0466         goto err;
0467     }
0468 
0469     /* When hash based routing of rx packet is enabled, 32 entries to map
0470      * the hash values to the ring will be configured.
0471      */
0472     ab->hw_params.hw_ops->reo_setup(ab);
0473 
0474     return 0;
0475 
0476 err:
0477     ath11k_dp_srng_common_cleanup(ab);
0478 
0479     return ret;
0480 }
0481 
0482 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
0483 {
0484     struct ath11k_dp *dp = &ab->dp;
0485     struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
0486     int i;
0487 
0488     for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
0489         if (!slist[i].vaddr)
0490             continue;
0491 
0492         dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
0493                   slist[i].vaddr, slist[i].paddr);
0494         slist[i].vaddr = NULL;
0495     }
0496 }
0497 
0498 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
0499                           int size,
0500                           u32 n_link_desc_bank,
0501                           u32 n_link_desc,
0502                           u32 last_bank_sz)
0503 {
0504     struct ath11k_dp *dp = &ab->dp;
0505     struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
0506     struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
0507     u32 n_entries_per_buf;
0508     int num_scatter_buf, scatter_idx;
0509     struct hal_wbm_link_desc *scatter_buf;
0510     int align_bytes, n_entries;
0511     dma_addr_t paddr;
0512     int rem_entries;
0513     int i;
0514     int ret = 0;
0515     u32 end_offset;
0516 
0517     n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
0518         ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
0519     num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
0520 
0521     if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
0522         return -EINVAL;
0523 
0524     for (i = 0; i < num_scatter_buf; i++) {
0525         slist[i].vaddr = dma_alloc_coherent(ab->dev,
0526                             HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
0527                             &slist[i].paddr, GFP_KERNEL);
0528         if (!slist[i].vaddr) {
0529             ret = -ENOMEM;
0530             goto err;
0531         }
0532     }
0533 
0534     scatter_idx = 0;
0535     scatter_buf = slist[scatter_idx].vaddr;
0536     rem_entries = n_entries_per_buf;
0537 
0538     for (i = 0; i < n_link_desc_bank; i++) {
0539         align_bytes = link_desc_banks[i].vaddr -
0540                   link_desc_banks[i].vaddr_unaligned;
0541         n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
0542                  HAL_LINK_DESC_SIZE;
0543         paddr = link_desc_banks[i].paddr;
0544         while (n_entries) {
0545             ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
0546             n_entries--;
0547             paddr += HAL_LINK_DESC_SIZE;
0548             if (rem_entries) {
0549                 rem_entries--;
0550                 scatter_buf++;
0551                 continue;
0552             }
0553 
0554             rem_entries = n_entries_per_buf;
0555             scatter_idx++;
0556             scatter_buf = slist[scatter_idx].vaddr;
0557         }
0558     }
0559 
0560     end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
0561              sizeof(struct hal_wbm_link_desc);
0562     ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
0563                     n_link_desc, end_offset);
0564 
0565     return 0;
0566 
0567 err:
0568     ath11k_dp_scatter_idle_link_desc_cleanup(ab);
0569 
0570     return ret;
0571 }
0572 
0573 static void
0574 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
0575                   struct dp_link_desc_bank *link_desc_banks)
0576 {
0577     int i;
0578 
0579     for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
0580         if (link_desc_banks[i].vaddr_unaligned) {
0581             dma_free_coherent(ab->dev,
0582                       link_desc_banks[i].size,
0583                       link_desc_banks[i].vaddr_unaligned,
0584                       link_desc_banks[i].paddr_unaligned);
0585             link_desc_banks[i].vaddr_unaligned = NULL;
0586         }
0587     }
0588 }
0589 
0590 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
0591                       struct dp_link_desc_bank *desc_bank,
0592                       int n_link_desc_bank,
0593                       int last_bank_sz)
0594 {
0595     struct ath11k_dp *dp = &ab->dp;
0596     int i;
0597     int ret = 0;
0598     int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
0599 
0600     for (i = 0; i < n_link_desc_bank; i++) {
0601         if (i == (n_link_desc_bank - 1) && last_bank_sz)
0602             desc_sz = last_bank_sz;
0603 
0604         desc_bank[i].vaddr_unaligned =
0605                     dma_alloc_coherent(ab->dev, desc_sz,
0606                                &desc_bank[i].paddr_unaligned,
0607                                GFP_KERNEL);
0608         if (!desc_bank[i].vaddr_unaligned) {
0609             ret = -ENOMEM;
0610             goto err;
0611         }
0612 
0613         desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
0614                            HAL_LINK_DESC_ALIGN);
0615         desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
0616                      ((unsigned long)desc_bank[i].vaddr -
0617                       (unsigned long)desc_bank[i].vaddr_unaligned);
0618         desc_bank[i].size = desc_sz;
0619     }
0620 
0621     return 0;
0622 
0623 err:
0624     ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
0625 
0626     return ret;
0627 }
0628 
0629 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
0630                  struct dp_link_desc_bank *desc_bank,
0631                  u32 ring_type, struct dp_srng *ring)
0632 {
0633     ath11k_dp_link_desc_bank_free(ab, desc_bank);
0634 
0635     if (ring_type != HAL_RXDMA_MONITOR_DESC) {
0636         ath11k_dp_srng_cleanup(ab, ring);
0637         ath11k_dp_scatter_idle_link_desc_cleanup(ab);
0638     }
0639 }
0640 
0641 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
0642 {
0643     struct ath11k_dp *dp = &ab->dp;
0644     u32 n_mpdu_link_desc, n_mpdu_queue_desc;
0645     u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
0646     int ret = 0;
0647 
0648     n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
0649                HAL_NUM_MPDUS_PER_LINK_DESC;
0650 
0651     n_mpdu_queue_desc = n_mpdu_link_desc /
0652                 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
0653 
0654     n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
0655                    DP_AVG_MSDUS_PER_FLOW) /
0656                   HAL_NUM_TX_MSDUS_PER_LINK_DESC;
0657 
0658     n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
0659                    DP_AVG_MSDUS_PER_MPDU) /
0660                   HAL_NUM_RX_MSDUS_PER_LINK_DESC;
0661 
0662     *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
0663               n_tx_msdu_link_desc + n_rx_msdu_link_desc;
0664 
0665     if (*n_link_desc & (*n_link_desc - 1))
0666         *n_link_desc = 1 << fls(*n_link_desc);
0667 
0668     ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
0669                    HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
0670     if (ret) {
0671         ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
0672         return ret;
0673     }
0674     return ret;
0675 }
0676 
0677 int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
0678                   struct dp_link_desc_bank *link_desc_banks,
0679                   u32 ring_type, struct hal_srng *srng,
0680                   u32 n_link_desc)
0681 {
0682     u32 tot_mem_sz;
0683     u32 n_link_desc_bank, last_bank_sz;
0684     u32 entry_sz, align_bytes, n_entries;
0685     u32 paddr;
0686     u32 *desc;
0687     int i, ret;
0688 
0689     tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
0690     tot_mem_sz += HAL_LINK_DESC_ALIGN;
0691 
0692     if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
0693         n_link_desc_bank = 1;
0694         last_bank_sz = tot_mem_sz;
0695     } else {
0696         n_link_desc_bank = tot_mem_sz /
0697                    (DP_LINK_DESC_ALLOC_SIZE_THRESH -
0698                     HAL_LINK_DESC_ALIGN);
0699         last_bank_sz = tot_mem_sz %
0700                    (DP_LINK_DESC_ALLOC_SIZE_THRESH -
0701                 HAL_LINK_DESC_ALIGN);
0702 
0703         if (last_bank_sz)
0704             n_link_desc_bank += 1;
0705     }
0706 
0707     if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
0708         return -EINVAL;
0709 
0710     ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
0711                          n_link_desc_bank, last_bank_sz);
0712     if (ret)
0713         return ret;
0714 
0715     /* Setup link desc idle list for HW internal usage */
0716     entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
0717     tot_mem_sz = entry_sz * n_link_desc;
0718 
0719     /* Setup scatter desc list when the total memory requirement is more */
0720     if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
0721         ring_type != HAL_RXDMA_MONITOR_DESC) {
0722         ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
0723                                  n_link_desc_bank,
0724                                  n_link_desc,
0725                                  last_bank_sz);
0726         if (ret) {
0727             ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
0728                     ret);
0729             goto fail_desc_bank_free;
0730         }
0731 
0732         return 0;
0733     }
0734 
0735     spin_lock_bh(&srng->lock);
0736 
0737     ath11k_hal_srng_access_begin(ab, srng);
0738 
0739     for (i = 0; i < n_link_desc_bank; i++) {
0740         align_bytes = link_desc_banks[i].vaddr -
0741                   link_desc_banks[i].vaddr_unaligned;
0742         n_entries = (link_desc_banks[i].size - align_bytes) /
0743                 HAL_LINK_DESC_SIZE;
0744         paddr = link_desc_banks[i].paddr;
0745         while (n_entries &&
0746                (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
0747             ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
0748                               i, paddr);
0749             n_entries--;
0750             paddr += HAL_LINK_DESC_SIZE;
0751         }
0752     }
0753 
0754     ath11k_hal_srng_access_end(ab, srng);
0755 
0756     spin_unlock_bh(&srng->lock);
0757 
0758     return 0;
0759 
0760 fail_desc_bank_free:
0761     ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
0762 
0763     return ret;
0764 }
0765 
0766 int ath11k_dp_service_srng(struct ath11k_base *ab,
0767                struct ath11k_ext_irq_grp *irq_grp,
0768                int budget)
0769 {
0770     struct napi_struct *napi = &irq_grp->napi;
0771     const struct ath11k_hw_hal_params *hal_params;
0772     int grp_id = irq_grp->grp_id;
0773     int work_done = 0;
0774     int i, j;
0775     int tot_work_done = 0;
0776 
0777     if (ab->hw_params.ring_mask->tx[grp_id]) {
0778         i = __fls(ab->hw_params.ring_mask->tx[grp_id]);
0779         ath11k_dp_tx_completion_handler(ab, i);
0780     }
0781 
0782     if (ab->hw_params.ring_mask->rx_err[grp_id]) {
0783         work_done = ath11k_dp_process_rx_err(ab, napi, budget);
0784         budget -= work_done;
0785         tot_work_done += work_done;
0786         if (budget <= 0)
0787             goto done;
0788     }
0789 
0790     if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
0791         work_done = ath11k_dp_rx_process_wbm_err(ab,
0792                              napi,
0793                              budget);
0794         budget -= work_done;
0795         tot_work_done += work_done;
0796 
0797         if (budget <= 0)
0798             goto done;
0799     }
0800 
0801     if (ab->hw_params.ring_mask->rx[grp_id]) {
0802         i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
0803         work_done = ath11k_dp_process_rx(ab, i, napi,
0804                          budget);
0805         budget -= work_done;
0806         tot_work_done += work_done;
0807         if (budget <= 0)
0808             goto done;
0809     }
0810 
0811     if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
0812         for (i = 0; i < ab->num_radios; i++) {
0813             for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
0814                 int id = i * ab->hw_params.num_rxmda_per_pdev + j;
0815 
0816                 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
0817                     BIT(id)) {
0818                     work_done =
0819                     ath11k_dp_rx_process_mon_rings(ab,
0820                                        id,
0821                                        napi, budget);
0822                     budget -= work_done;
0823                     tot_work_done += work_done;
0824 
0825                     if (budget <= 0)
0826                         goto done;
0827                 }
0828             }
0829         }
0830     }
0831 
0832     if (ab->hw_params.ring_mask->reo_status[grp_id])
0833         ath11k_dp_process_reo_status(ab);
0834 
0835     for (i = 0; i < ab->num_radios; i++) {
0836         for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
0837             int id = i * ab->hw_params.num_rxmda_per_pdev + j;
0838 
0839             if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
0840                 work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
0841                 budget -= work_done;
0842                 tot_work_done += work_done;
0843             }
0844 
0845             if (budget <= 0)
0846                 goto done;
0847 
0848             if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
0849                 struct ath11k *ar = ath11k_ab_to_ar(ab, id);
0850                 struct ath11k_pdev_dp *dp = &ar->dp;
0851                 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
0852 
0853                 hal_params = ab->hw_params.hal_params;
0854                 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
0855                                hal_params->rx_buf_rbm);
0856             }
0857         }
0858     }
0859     /* TODO: Implement handler for other interrupts */
0860 
0861 done:
0862     return tot_work_done;
0863 }
0864 EXPORT_SYMBOL(ath11k_dp_service_srng);
0865 
0866 void ath11k_dp_pdev_free(struct ath11k_base *ab)
0867 {
0868     struct ath11k *ar;
0869     int i;
0870 
0871     del_timer_sync(&ab->mon_reap_timer);
0872 
0873     for (i = 0; i < ab->num_radios; i++) {
0874         ar = ab->pdevs[i].ar;
0875         ath11k_dp_rx_pdev_free(ab, i);
0876         ath11k_debugfs_unregister(ar);
0877         ath11k_dp_rx_pdev_mon_detach(ar);
0878     }
0879 }
0880 
0881 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
0882 {
0883     struct ath11k *ar;
0884     struct ath11k_pdev_dp *dp;
0885     int i;
0886     int j;
0887 
0888     for (i = 0; i <  ab->num_radios; i++) {
0889         ar = ab->pdevs[i].ar;
0890         dp = &ar->dp;
0891         dp->mac_id = i;
0892         idr_init(&dp->rx_refill_buf_ring.bufs_idr);
0893         spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
0894         atomic_set(&dp->num_tx_pending, 0);
0895         init_waitqueue_head(&dp->tx_empty_waitq);
0896         for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
0897             idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
0898             spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
0899         }
0900         idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
0901         spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
0902     }
0903 }
0904 
0905 int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
0906 {
0907     struct ath11k *ar;
0908     int ret;
0909     int i;
0910 
0911     /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
0912     for (i = 0; i < ab->num_radios; i++) {
0913         ar = ab->pdevs[i].ar;
0914         ret = ath11k_dp_rx_pdev_alloc(ab, i);
0915         if (ret) {
0916             ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
0917                     i);
0918             goto err;
0919         }
0920         ret = ath11k_dp_rx_pdev_mon_attach(ar);
0921         if (ret) {
0922             ath11k_warn(ab, "failed to initialize mon pdev %d\n",
0923                     i);
0924             goto err;
0925         }
0926     }
0927 
0928     return 0;
0929 
0930 err:
0931     ath11k_dp_pdev_free(ab);
0932 
0933     return ret;
0934 }
0935 
0936 int ath11k_dp_htt_connect(struct ath11k_dp *dp)
0937 {
0938     struct ath11k_htc_svc_conn_req conn_req;
0939     struct ath11k_htc_svc_conn_resp conn_resp;
0940     int status;
0941 
0942     memset(&conn_req, 0, sizeof(conn_req));
0943     memset(&conn_resp, 0, sizeof(conn_resp));
0944 
0945     conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
0946     conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
0947 
0948     /* connect to control service */
0949     conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
0950 
0951     status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
0952                         &conn_resp);
0953 
0954     if (status)
0955         return status;
0956 
0957     dp->eid = conn_resp.eid;
0958 
0959     return 0;
0960 }
0961 
0962 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
0963 {
0964      /* When v2_map_support is true:for STA mode, enable address
0965       * search index, tcl uses ast_hash value in the descriptor.
0966       * When v2_map_support is false: for STA mode, dont' enable
0967       * address search index.
0968       */
0969     switch (arvif->vdev_type) {
0970     case WMI_VDEV_TYPE_STA:
0971         if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
0972             arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
0973             arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
0974         } else {
0975             arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
0976             arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
0977         }
0978         break;
0979     case WMI_VDEV_TYPE_AP:
0980     case WMI_VDEV_TYPE_IBSS:
0981         arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
0982         arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
0983         break;
0984     case WMI_VDEV_TYPE_MONITOR:
0985     default:
0986         return;
0987     }
0988 }
0989 
0990 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
0991 {
0992     arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
0993                    FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
0994                       arvif->vdev_id) |
0995                    FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
0996                       ar->pdev->pdev_id);
0997 
0998     /* set HTT extension valid bit to 0 by default */
0999     arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1000 
1001     ath11k_dp_update_vdev_search(arvif);
1002 }
1003 
1004 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1005 {
1006     struct ath11k_base *ab = (struct ath11k_base *)ctx;
1007     struct sk_buff *msdu = skb;
1008 
1009     dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1010              DMA_TO_DEVICE);
1011 
1012     dev_kfree_skb_any(msdu);
1013 
1014     return 0;
1015 }
1016 
1017 void ath11k_dp_free(struct ath11k_base *ab)
1018 {
1019     struct ath11k_dp *dp = &ab->dp;
1020     int i;
1021 
1022     ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1023                     HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1024 
1025     ath11k_dp_srng_common_cleanup(ab);
1026 
1027     ath11k_dp_reo_cmd_list_cleanup(ab);
1028 
1029     for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1030         spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1031         idr_for_each(&dp->tx_ring[i].txbuf_idr,
1032                  ath11k_dp_tx_pending_cleanup, ab);
1033         idr_destroy(&dp->tx_ring[i].txbuf_idr);
1034         spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1035         kfree(dp->tx_ring[i].tx_status);
1036     }
1037 
1038     /* Deinit any SOC level resource */
1039 }
1040 
1041 int ath11k_dp_alloc(struct ath11k_base *ab)
1042 {
1043     struct ath11k_dp *dp = &ab->dp;
1044     struct hal_srng *srng = NULL;
1045     size_t size = 0;
1046     u32 n_link_desc = 0;
1047     int ret;
1048     int i;
1049 
1050     dp->ab = ab;
1051 
1052     INIT_LIST_HEAD(&dp->reo_cmd_list);
1053     INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1054     INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1055     spin_lock_init(&dp->reo_cmd_lock);
1056 
1057     dp->reo_cmd_cache_flush_count = 0;
1058 
1059     ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1060     if (ret) {
1061         ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1062         return ret;
1063     }
1064 
1065     srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1066 
1067     ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1068                     HAL_WBM_IDLE_LINK, srng, n_link_desc);
1069     if (ret) {
1070         ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1071         return ret;
1072     }
1073 
1074     ret = ath11k_dp_srng_common_setup(ab);
1075     if (ret)
1076         goto fail_link_desc_cleanup;
1077 
1078     size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1079 
1080     for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1081         idr_init(&dp->tx_ring[i].txbuf_idr);
1082         spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1083         dp->tx_ring[i].tcl_data_ring_id = i;
1084 
1085         dp->tx_ring[i].tx_status_head = 0;
1086         dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1087         dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1088         if (!dp->tx_ring[i].tx_status) {
1089             ret = -ENOMEM;
1090             goto fail_cmn_srng_cleanup;
1091         }
1092     }
1093 
1094     for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1095         ath11k_hal_tx_set_dscp_tid_map(ab, i);
1096 
1097     /* Init any SOC level resource for DP */
1098 
1099     return 0;
1100 
1101 fail_cmn_srng_cleanup:
1102     ath11k_dp_srng_common_cleanup(ab);
1103 
1104 fail_link_desc_cleanup:
1105     ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1106                     HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1107 
1108     return ret;
1109 }
1110 
1111 static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1112 {
1113     struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1114                                  t, timer);
1115     struct ath11k_base *ab = update_timer->ab;
1116     struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
1117 
1118     spin_lock_bh(&srng->lock);
1119 
1120     /* when the timer is fired, the handler checks whether there
1121      * are new TX happened. The handler updates HP only when there
1122      * are no TX operations during the timeout interval, and stop
1123      * the timer. Timer will be started again when TX happens again.
1124      */
1125     if (update_timer->timer_tx_num != update_timer->tx_num) {
1126         update_timer->timer_tx_num = update_timer->tx_num;
1127         mod_timer(&update_timer->timer, jiffies +
1128           msecs_to_jiffies(update_timer->interval));
1129     } else {
1130         update_timer->started = false;
1131         ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1132     }
1133 
1134     spin_unlock_bh(&srng->lock);
1135 }
1136 
1137 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1138                   struct hal_srng *srng,
1139                   struct ath11k_hp_update_timer *update_timer)
1140 {
1141     lockdep_assert_held(&srng->lock);
1142 
1143     if (!ab->hw_params.supports_shadow_regs)
1144         return;
1145 
1146     update_timer->tx_num++;
1147 
1148     if (update_timer->started)
1149         return;
1150 
1151     update_timer->started = true;
1152     update_timer->timer_tx_num = update_timer->tx_num;
1153     mod_timer(&update_timer->timer, jiffies +
1154           msecs_to_jiffies(update_timer->interval));
1155 }
1156 
1157 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1158                  struct ath11k_hp_update_timer *update_timer)
1159 {
1160     if (!ab->hw_params.supports_shadow_regs)
1161         return;
1162 
1163     if (!update_timer->init)
1164         return;
1165 
1166     del_timer_sync(&update_timer->timer);
1167 }
1168 
1169 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1170                  struct ath11k_hp_update_timer *update_timer,
1171                  u32 interval, u32 ring_id)
1172 {
1173     if (!ab->hw_params.supports_shadow_regs)
1174         return;
1175 
1176     update_timer->tx_num = 0;
1177     update_timer->timer_tx_num = 0;
1178     update_timer->ab = ab;
1179     update_timer->ring_id = ring_id;
1180     update_timer->interval = interval;
1181     update_timer->init = true;
1182     timer_setup(&update_timer->timer,
1183             ath11k_dp_shadow_timer_handler, 0);
1184 }