Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: BSD-3-Clause-Clear
0002 /*
0003  * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
0004  */
0005 
0006 #include "core.h"
0007 #include "debug.h"
0008 
0009 #define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
0010 
0011 int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
0012 {
0013     u32 *temp;
0014     int idx;
0015 
0016     size = size >> 2;
0017 
0018     for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
0019         if (*temp == ATH11K_DB_MAGIC_VALUE)
0020             return -EINVAL;
0021     }
0022 
0023     return 0;
0024 }
0025 
0026 static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
0027                        void *buffer, u32 size)
0028 {
0029     u32 *temp;
0030     int idx;
0031 
0032     size = size >> 2;
0033 
0034     for (idx = 0, temp = buffer; idx < size; idx++, temp++)
0035         *temp++ = ATH11K_DB_MAGIC_VALUE;
0036 }
0037 
0038 static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
0039                     struct ath11k_dbring *ring,
0040                     struct ath11k_dbring_element *buff,
0041                     enum wmi_direct_buffer_module id)
0042 {
0043     struct ath11k_base *ab = ar->ab;
0044     struct hal_srng *srng;
0045     dma_addr_t paddr;
0046     void *ptr_aligned, *ptr_unaligned, *desc;
0047     int ret;
0048     int buf_id;
0049     u32 cookie;
0050 
0051     srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
0052 
0053     lockdep_assert_held(&srng->lock);
0054 
0055     ath11k_hal_srng_access_begin(ab, srng);
0056 
0057     ptr_unaligned = buff->payload;
0058     ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
0059     ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
0060     paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
0061                    DMA_FROM_DEVICE);
0062 
0063     ret = dma_mapping_error(ab->dev, paddr);
0064     if (ret)
0065         goto err;
0066 
0067     spin_lock_bh(&ring->idr_lock);
0068     buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
0069     spin_unlock_bh(&ring->idr_lock);
0070     if (buf_id < 0) {
0071         ret = -ENOBUFS;
0072         goto err_dma_unmap;
0073     }
0074 
0075     desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
0076     if (!desc) {
0077         ret = -ENOENT;
0078         goto err_idr_remove;
0079     }
0080 
0081     buff->paddr = paddr;
0082 
0083     cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
0084          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
0085 
0086     ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
0087 
0088     ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
0089     ath11k_hal_srng_access_end(ab, srng);
0090 
0091     return 0;
0092 
0093 err_idr_remove:
0094     spin_lock_bh(&ring->idr_lock);
0095     idr_remove(&ring->bufs_idr, buf_id);
0096     spin_unlock_bh(&ring->idr_lock);
0097 err_dma_unmap:
0098     dma_unmap_single(ab->dev, paddr, ring->buf_sz,
0099              DMA_FROM_DEVICE);
0100 err:
0101     ath11k_hal_srng_access_end(ab, srng);
0102     return ret;
0103 }
0104 
0105 static int ath11k_dbring_fill_bufs(struct ath11k *ar,
0106                    struct ath11k_dbring *ring,
0107                    enum wmi_direct_buffer_module id)
0108 {
0109     struct ath11k_dbring_element *buff;
0110     struct hal_srng *srng;
0111     int num_remain, req_entries, num_free;
0112     u32 align;
0113     int size, ret;
0114 
0115     srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
0116 
0117     spin_lock_bh(&srng->lock);
0118 
0119     num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
0120     req_entries = min(num_free, ring->bufs_max);
0121     num_remain = req_entries;
0122     align = ring->buf_align;
0123     size = ring->buf_sz + align - 1;
0124 
0125     while (num_remain > 0) {
0126         buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
0127         if (!buff)
0128             break;
0129 
0130         buff->payload = kzalloc(size, GFP_ATOMIC);
0131         if (!buff->payload) {
0132             kfree(buff);
0133             break;
0134         }
0135         ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
0136         if (ret) {
0137             ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
0138                     num_remain, req_entries);
0139             kfree(buff->payload);
0140             kfree(buff);
0141             break;
0142         }
0143         num_remain--;
0144     }
0145 
0146     spin_unlock_bh(&srng->lock);
0147 
0148     return num_remain;
0149 }
0150 
0151 int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
0152                 struct ath11k_dbring *ring,
0153                 enum wmi_direct_buffer_module id)
0154 {
0155     struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
0156     int ret;
0157 
0158     if (id >= WMI_DIRECT_BUF_MAX)
0159         return -EINVAL;
0160 
0161     param.pdev_id       = DP_SW2HW_MACID(ring->pdev_id);
0162     param.module_id     = id;
0163     param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
0164     param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
0165     param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
0166     param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
0167     param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
0168     param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
0169     param.num_elems     = ring->bufs_max;
0170     param.buf_size      = ring->buf_sz;
0171     param.num_resp_per_event = ring->num_resp_per_event;
0172     param.event_timeout_ms  = ring->event_timeout_ms;
0173 
0174     ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
0175     if (ret) {
0176         ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
0177         return ret;
0178     }
0179 
0180     return 0;
0181 }
0182 
0183 int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
0184               u32 num_resp_per_event, u32 event_timeout_ms,
0185               int (*handler)(struct ath11k *,
0186                      struct ath11k_dbring_data *))
0187 {
0188     if (WARN_ON(!ring))
0189         return -EINVAL;
0190 
0191     ring->num_resp_per_event = num_resp_per_event;
0192     ring->event_timeout_ms = event_timeout_ms;
0193     ring->handler = handler;
0194 
0195     return 0;
0196 }
0197 
0198 int ath11k_dbring_buf_setup(struct ath11k *ar,
0199                 struct ath11k_dbring *ring,
0200                 struct ath11k_dbring_cap *db_cap)
0201 {
0202     struct ath11k_base *ab = ar->ab;
0203     struct hal_srng *srng;
0204     int ret;
0205 
0206     srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
0207     ring->bufs_max = ring->refill_srng.size /
0208         ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
0209 
0210     ring->buf_sz = db_cap->min_buf_sz;
0211     ring->buf_align = db_cap->min_buf_align;
0212     ring->pdev_id = db_cap->pdev_id;
0213     ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
0214     ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
0215 
0216     ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
0217 
0218     return ret;
0219 }
0220 
0221 int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
0222                  int ring_num, int num_entries)
0223 {
0224     int ret;
0225 
0226     ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
0227                    ring_num, ar->pdev_idx, num_entries);
0228     if (ret < 0) {
0229         ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
0230                 ret, ring_num);
0231         goto err;
0232     }
0233 
0234     return 0;
0235 err:
0236     ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
0237     return ret;
0238 }
0239 
0240 int ath11k_dbring_get_cap(struct ath11k_base *ab,
0241               u8 pdev_idx,
0242               enum wmi_direct_buffer_module id,
0243               struct ath11k_dbring_cap *db_cap)
0244 {
0245     int i;
0246 
0247     if (!ab->num_db_cap || !ab->db_caps)
0248         return -ENOENT;
0249 
0250     if (id >= WMI_DIRECT_BUF_MAX)
0251         return -EINVAL;
0252 
0253     for (i = 0; i < ab->num_db_cap; i++) {
0254         if (pdev_idx == ab->db_caps[i].pdev_id &&
0255             id == ab->db_caps[i].id) {
0256             *db_cap = ab->db_caps[i];
0257 
0258             return 0;
0259         }
0260     }
0261 
0262     return -ENOENT;
0263 }
0264 
0265 int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
0266                        struct ath11k_dbring_buf_release_event *ev)
0267 {
0268     struct ath11k_dbring *ring;
0269     struct hal_srng *srng;
0270     struct ath11k *ar;
0271     struct ath11k_dbring_element *buff;
0272     struct ath11k_dbring_data handler_data;
0273     struct ath11k_buffer_addr desc;
0274     u8 *vaddr_unalign;
0275     u32 num_entry, num_buff_reaped;
0276     u8 pdev_idx, rbm, module_id;
0277     u32 cookie;
0278     int buf_id;
0279     int size;
0280     dma_addr_t paddr;
0281     int ret = 0;
0282 
0283     pdev_idx = ev->fixed.pdev_id;
0284     module_id = ev->fixed.module_id;
0285 
0286     if (pdev_idx >= ab->num_radios) {
0287         ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
0288         return -EINVAL;
0289     }
0290 
0291     if (ev->fixed.num_buf_release_entry !=
0292         ev->fixed.num_meta_data_entry) {
0293         ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
0294                 ev->fixed.num_buf_release_entry,
0295                 ev->fixed.num_meta_data_entry);
0296         return -EINVAL;
0297     }
0298 
0299     ar = ab->pdevs[pdev_idx].ar;
0300 
0301     rcu_read_lock();
0302     if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
0303         ret = -EINVAL;
0304         goto rcu_unlock;
0305     }
0306 
0307     switch (ev->fixed.module_id) {
0308     case WMI_DIRECT_BUF_SPECTRAL:
0309         ring = ath11k_spectral_get_dbring(ar);
0310         break;
0311     default:
0312         ring = NULL;
0313         ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
0314                 ev->fixed.module_id);
0315         break;
0316     }
0317 
0318     if (!ring) {
0319         ret = -EINVAL;
0320         goto rcu_unlock;
0321     }
0322 
0323     srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
0324     num_entry = ev->fixed.num_buf_release_entry;
0325     size = ring->buf_sz + ring->buf_align - 1;
0326     num_buff_reaped = 0;
0327 
0328     spin_lock_bh(&srng->lock);
0329 
0330     while (num_buff_reaped < num_entry) {
0331         desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
0332         desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
0333         handler_data.meta = ev->meta_data[num_buff_reaped];
0334 
0335         num_buff_reaped++;
0336 
0337         ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
0338 
0339         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
0340 
0341         spin_lock_bh(&ring->idr_lock);
0342         buff = idr_find(&ring->bufs_idr, buf_id);
0343         if (!buff) {
0344             spin_unlock_bh(&ring->idr_lock);
0345             continue;
0346         }
0347         idr_remove(&ring->bufs_idr, buf_id);
0348         spin_unlock_bh(&ring->idr_lock);
0349 
0350         dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
0351                  DMA_FROM_DEVICE);
0352 
0353         ath11k_debugfs_add_dbring_entry(ar, module_id,
0354                         ATH11K_DBG_DBR_EVENT_RX, srng);
0355 
0356         if (ring->handler) {
0357             vaddr_unalign = buff->payload;
0358             handler_data.data = PTR_ALIGN(vaddr_unalign,
0359                               ring->buf_align);
0360             handler_data.data_sz = ring->buf_sz;
0361 
0362             ring->handler(ar, &handler_data);
0363         }
0364 
0365         buff->paddr = 0;
0366         memset(buff->payload, 0, size);
0367         ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
0368     }
0369 
0370     spin_unlock_bh(&srng->lock);
0371 
0372 rcu_unlock:
0373     rcu_read_unlock();
0374 
0375     return ret;
0376 }
0377 
0378 void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
0379 {
0380     ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
0381 }
0382 
0383 void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
0384 {
0385     struct ath11k_dbring_element *buff;
0386     int buf_id;
0387 
0388     spin_lock_bh(&ring->idr_lock);
0389     idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
0390         idr_remove(&ring->bufs_idr, buf_id);
0391         dma_unmap_single(ar->ab->dev, buff->paddr,
0392                  ring->buf_sz, DMA_FROM_DEVICE);
0393         kfree(buff->payload);
0394         kfree(buff);
0395     }
0396 
0397     idr_destroy(&ring->bufs_idr);
0398     spin_unlock_bh(&ring->idr_lock);
0399 }