0001
0002
0003
0004
0005
0006
0007 #include "dp_rx.h"
0008 #include "debug.h"
0009 #include "hif.h"
0010
0011 const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
0012
0013 {
0014 .flags = CE_ATTR_FLAGS,
0015 .src_nentries = 16,
0016 .src_sz_max = 2048,
0017 .dest_nentries = 0,
0018 .send_cb = ath11k_htc_tx_completion_handler,
0019 },
0020
0021
0022 {
0023 .flags = CE_ATTR_FLAGS,
0024 .src_nentries = 0,
0025 .src_sz_max = 2048,
0026 .dest_nentries = 512,
0027 .recv_cb = ath11k_htc_rx_completion_handler,
0028 },
0029
0030
0031 {
0032 .flags = CE_ATTR_FLAGS,
0033 .src_nentries = 0,
0034 .src_sz_max = 2048,
0035 .dest_nentries = 512,
0036 .recv_cb = ath11k_htc_rx_completion_handler,
0037 },
0038
0039
0040 {
0041 .flags = CE_ATTR_FLAGS,
0042 .src_nentries = 32,
0043 .src_sz_max = 2048,
0044 .dest_nentries = 0,
0045 .send_cb = ath11k_htc_tx_completion_handler,
0046 },
0047
0048
0049 {
0050 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
0051 .src_nentries = 2048,
0052 .src_sz_max = 256,
0053 .dest_nentries = 0,
0054 },
0055
0056
0057 {
0058 .flags = CE_ATTR_FLAGS,
0059 .src_nentries = 0,
0060 .src_sz_max = 2048,
0061 .dest_nentries = 512,
0062 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
0063 },
0064
0065
0066 {
0067 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
0068 .src_nentries = 0,
0069 .src_sz_max = 0,
0070 .dest_nentries = 0,
0071 },
0072
0073
0074 {
0075 .flags = CE_ATTR_FLAGS,
0076 .src_nentries = 32,
0077 .src_sz_max = 2048,
0078 .dest_nentries = 0,
0079 .send_cb = ath11k_htc_tx_completion_handler,
0080 },
0081
0082
0083 {
0084 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
0085 .src_nentries = 0,
0086 .src_sz_max = 0,
0087 .dest_nentries = 0,
0088 },
0089
0090
0091 {
0092 .flags = CE_ATTR_FLAGS,
0093 .src_nentries = 32,
0094 .src_sz_max = 2048,
0095 .dest_nentries = 0,
0096 .send_cb = ath11k_htc_tx_completion_handler,
0097 },
0098
0099
0100 {
0101 .flags = CE_ATTR_FLAGS,
0102 .src_nentries = 0,
0103 .src_sz_max = 2048,
0104 .dest_nentries = 512,
0105 .recv_cb = ath11k_htc_rx_completion_handler,
0106 },
0107
0108
0109 {
0110 .flags = CE_ATTR_FLAGS,
0111 .src_nentries = 0,
0112 .src_sz_max = 0,
0113 .dest_nentries = 0,
0114 },
0115 };
0116
0117 const struct ce_attr ath11k_host_ce_config_qca6390[] = {
0118
0119 {
0120 .flags = CE_ATTR_FLAGS,
0121 .src_nentries = 16,
0122 .src_sz_max = 2048,
0123 .dest_nentries = 0,
0124 },
0125
0126
0127 {
0128 .flags = CE_ATTR_FLAGS,
0129 .src_nentries = 0,
0130 .src_sz_max = 2048,
0131 .dest_nentries = 512,
0132 .recv_cb = ath11k_htc_rx_completion_handler,
0133 },
0134
0135
0136 {
0137 .flags = CE_ATTR_FLAGS,
0138 .src_nentries = 0,
0139 .src_sz_max = 2048,
0140 .dest_nentries = 512,
0141 .recv_cb = ath11k_htc_rx_completion_handler,
0142 },
0143
0144
0145 {
0146 .flags = CE_ATTR_FLAGS,
0147 .src_nentries = 32,
0148 .src_sz_max = 2048,
0149 .dest_nentries = 0,
0150 .send_cb = ath11k_htc_tx_completion_handler,
0151 },
0152
0153
0154 {
0155 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
0156 .src_nentries = 2048,
0157 .src_sz_max = 256,
0158 .dest_nentries = 0,
0159 },
0160
0161
0162 {
0163 .flags = CE_ATTR_FLAGS,
0164 .src_nentries = 0,
0165 .src_sz_max = 2048,
0166 .dest_nentries = 512,
0167 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
0168 },
0169
0170
0171 {
0172 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
0173 .src_nentries = 0,
0174 .src_sz_max = 0,
0175 .dest_nentries = 0,
0176 },
0177
0178
0179 {
0180 .flags = CE_ATTR_FLAGS,
0181 .src_nentries = 32,
0182 .src_sz_max = 2048,
0183 .dest_nentries = 0,
0184 .send_cb = ath11k_htc_tx_completion_handler,
0185 },
0186
0187
0188 {
0189 .flags = CE_ATTR_FLAGS,
0190 .src_nentries = 0,
0191 .src_sz_max = 0,
0192 .dest_nentries = 0,
0193 },
0194
0195 };
0196
0197 const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
0198
0199 {
0200 .flags = CE_ATTR_FLAGS,
0201 .src_nentries = 16,
0202 .src_sz_max = 2048,
0203 .dest_nentries = 0,
0204 },
0205
0206
0207 {
0208 .flags = CE_ATTR_FLAGS,
0209 .src_nentries = 0,
0210 .src_sz_max = 2048,
0211 .dest_nentries = 512,
0212 .recv_cb = ath11k_htc_rx_completion_handler,
0213 },
0214
0215
0216 {
0217 .flags = CE_ATTR_FLAGS,
0218 .src_nentries = 0,
0219 .src_sz_max = 2048,
0220 .dest_nentries = 32,
0221 .recv_cb = ath11k_htc_rx_completion_handler,
0222 },
0223
0224
0225 {
0226 .flags = CE_ATTR_FLAGS,
0227 .src_nentries = 32,
0228 .src_sz_max = 2048,
0229 .dest_nentries = 0,
0230 .send_cb = ath11k_htc_tx_completion_handler,
0231 },
0232
0233
0234 {
0235 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
0236 .src_nentries = 2048,
0237 .src_sz_max = 256,
0238 .dest_nentries = 0,
0239 },
0240
0241
0242 {
0243 .flags = CE_ATTR_FLAGS,
0244 .src_nentries = 0,
0245 .src_sz_max = 2048,
0246 .dest_nentries = 512,
0247 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
0248 },
0249 };
0250
0251 static bool ath11k_ce_need_shadow_fix(int ce_id)
0252 {
0253
0254 if (ce_id == 4)
0255 return true;
0256 return false;
0257 }
0258
0259 void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
0260 {
0261 int i;
0262
0263 if (!ab->hw_params.supports_shadow_regs)
0264 return;
0265
0266 for (i = 0; i < ab->hw_params.ce_count; i++)
0267 if (ath11k_ce_need_shadow_fix(i))
0268 ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
0269 }
0270
0271 static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
0272 struct sk_buff *skb, dma_addr_t paddr)
0273 {
0274 struct ath11k_base *ab = pipe->ab;
0275 struct ath11k_ce_ring *ring = pipe->dest_ring;
0276 struct hal_srng *srng;
0277 unsigned int write_index;
0278 unsigned int nentries_mask = ring->nentries_mask;
0279 u32 *desc;
0280 int ret;
0281
0282 lockdep_assert_held(&ab->ce.ce_lock);
0283
0284 write_index = ring->write_index;
0285
0286 srng = &ab->hal.srng_list[ring->hal_ring_id];
0287
0288 spin_lock_bh(&srng->lock);
0289
0290 ath11k_hal_srng_access_begin(ab, srng);
0291
0292 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
0293 ret = -ENOSPC;
0294 goto exit;
0295 }
0296
0297 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
0298 if (!desc) {
0299 ret = -ENOSPC;
0300 goto exit;
0301 }
0302
0303 ath11k_hal_ce_dst_set_desc(desc, paddr);
0304
0305 ring->skb[write_index] = skb;
0306 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
0307 ring->write_index = write_index;
0308
0309 pipe->rx_buf_needed--;
0310
0311 ret = 0;
0312 exit:
0313 ath11k_hal_srng_access_end(ab, srng);
0314
0315 spin_unlock_bh(&srng->lock);
0316
0317 return ret;
0318 }
0319
0320 static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
0321 {
0322 struct ath11k_base *ab = pipe->ab;
0323 struct sk_buff *skb;
0324 dma_addr_t paddr;
0325 int ret = 0;
0326
0327 if (!(pipe->dest_ring || pipe->status_ring))
0328 return 0;
0329
0330 spin_lock_bh(&ab->ce.ce_lock);
0331 while (pipe->rx_buf_needed) {
0332 skb = dev_alloc_skb(pipe->buf_sz);
0333 if (!skb) {
0334 ret = -ENOMEM;
0335 goto exit;
0336 }
0337
0338 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
0339
0340 paddr = dma_map_single(ab->dev, skb->data,
0341 skb->len + skb_tailroom(skb),
0342 DMA_FROM_DEVICE);
0343 if (unlikely(dma_mapping_error(ab->dev, paddr))) {
0344 ath11k_warn(ab, "failed to dma map ce rx buf\n");
0345 dev_kfree_skb_any(skb);
0346 ret = -EIO;
0347 goto exit;
0348 }
0349
0350 ATH11K_SKB_RXCB(skb)->paddr = paddr;
0351
0352 ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
0353
0354 if (ret) {
0355 ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
0356 dma_unmap_single(ab->dev, paddr,
0357 skb->len + skb_tailroom(skb),
0358 DMA_FROM_DEVICE);
0359 dev_kfree_skb_any(skb);
0360 goto exit;
0361 }
0362 }
0363
0364 exit:
0365 spin_unlock_bh(&ab->ce.ce_lock);
0366 return ret;
0367 }
0368
0369 static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
0370 struct sk_buff **skb, int *nbytes)
0371 {
0372 struct ath11k_base *ab = pipe->ab;
0373 struct hal_srng *srng;
0374 unsigned int sw_index;
0375 unsigned int nentries_mask;
0376 u32 *desc;
0377 int ret = 0;
0378
0379 spin_lock_bh(&ab->ce.ce_lock);
0380
0381 sw_index = pipe->dest_ring->sw_index;
0382 nentries_mask = pipe->dest_ring->nentries_mask;
0383
0384 srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
0385
0386 spin_lock_bh(&srng->lock);
0387
0388 ath11k_hal_srng_access_begin(ab, srng);
0389
0390 desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
0391 if (!desc) {
0392 ret = -EIO;
0393 goto err;
0394 }
0395
0396 *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
0397 if (*nbytes == 0) {
0398 ret = -EIO;
0399 goto err;
0400 }
0401
0402 *skb = pipe->dest_ring->skb[sw_index];
0403 pipe->dest_ring->skb[sw_index] = NULL;
0404
0405 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
0406 pipe->dest_ring->sw_index = sw_index;
0407
0408 pipe->rx_buf_needed++;
0409 err:
0410 ath11k_hal_srng_access_end(ab, srng);
0411
0412 spin_unlock_bh(&srng->lock);
0413
0414 spin_unlock_bh(&ab->ce.ce_lock);
0415
0416 return ret;
0417 }
0418
0419 static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
0420 {
0421 struct ath11k_base *ab = pipe->ab;
0422 struct sk_buff *skb;
0423 struct sk_buff_head list;
0424 unsigned int nbytes, max_nbytes;
0425 int ret;
0426
0427 __skb_queue_head_init(&list);
0428 while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
0429 max_nbytes = skb->len + skb_tailroom(skb);
0430 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
0431 max_nbytes, DMA_FROM_DEVICE);
0432
0433 if (unlikely(max_nbytes < nbytes)) {
0434 ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
0435 nbytes, max_nbytes);
0436 dev_kfree_skb_any(skb);
0437 continue;
0438 }
0439
0440 skb_put(skb, nbytes);
0441 __skb_queue_tail(&list, skb);
0442 }
0443
0444 while ((skb = __skb_dequeue(&list))) {
0445 ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
0446 pipe->pipe_num, skb->len);
0447 pipe->recv_cb(ab, skb);
0448 }
0449
0450 ret = ath11k_ce_rx_post_pipe(pipe);
0451 if (ret && ret != -ENOSPC) {
0452 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
0453 pipe->pipe_num, ret);
0454 mod_timer(&ab->rx_replenish_retry,
0455 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
0456 }
0457 }
0458
0459 static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
0460 {
0461 struct ath11k_base *ab = pipe->ab;
0462 struct hal_srng *srng;
0463 unsigned int sw_index;
0464 unsigned int nentries_mask;
0465 struct sk_buff *skb;
0466 u32 *desc;
0467
0468 spin_lock_bh(&ab->ce.ce_lock);
0469
0470 sw_index = pipe->src_ring->sw_index;
0471 nentries_mask = pipe->src_ring->nentries_mask;
0472
0473 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
0474
0475 spin_lock_bh(&srng->lock);
0476
0477 ath11k_hal_srng_access_begin(ab, srng);
0478
0479 desc = ath11k_hal_srng_src_reap_next(ab, srng);
0480 if (!desc) {
0481 skb = ERR_PTR(-EIO);
0482 goto err_unlock;
0483 }
0484
0485 skb = pipe->src_ring->skb[sw_index];
0486
0487 pipe->src_ring->skb[sw_index] = NULL;
0488
0489 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
0490 pipe->src_ring->sw_index = sw_index;
0491
0492 err_unlock:
0493 spin_unlock_bh(&srng->lock);
0494
0495 spin_unlock_bh(&ab->ce.ce_lock);
0496
0497 return skb;
0498 }
0499
0500 static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
0501 {
0502 struct ath11k_base *ab = pipe->ab;
0503 struct sk_buff *skb;
0504 struct sk_buff_head list;
0505
0506 __skb_queue_head_init(&list);
0507 while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
0508 if (!skb)
0509 continue;
0510
0511 dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
0512 DMA_TO_DEVICE);
0513
0514 if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
0515 dev_kfree_skb_any(skb);
0516 continue;
0517 }
0518
0519 __skb_queue_tail(&list, skb);
0520 }
0521
0522 while ((skb = __skb_dequeue(&list))) {
0523 ath11k_dbg(ab, ATH11K_DBG_AHB, "tx ce pipe %d len %d\n",
0524 pipe->pipe_num, skb->len);
0525 pipe->send_cb(ab, skb);
0526 }
0527 }
0528
0529 static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
0530 struct hal_srng_params *ring_params)
0531 {
0532 u32 msi_data_start;
0533 u32 msi_data_count, msi_data_idx;
0534 u32 msi_irq_start;
0535 u32 addr_lo;
0536 u32 addr_hi;
0537 int ret;
0538
0539 ret = ath11k_get_user_msi_vector(ab, "CE",
0540 &msi_data_count, &msi_data_start,
0541 &msi_irq_start);
0542
0543 if (ret)
0544 return;
0545
0546 ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
0547 ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
0548
0549 ring_params->msi_addr = addr_lo;
0550 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
0551 ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
0552 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
0553 }
0554
0555 static int ath11k_ce_init_ring(struct ath11k_base *ab,
0556 struct ath11k_ce_ring *ce_ring,
0557 int ce_id, enum hal_ring_type type)
0558 {
0559 struct hal_srng_params params = { 0 };
0560 int ret;
0561
0562 params.ring_base_paddr = ce_ring->base_addr_ce_space;
0563 params.ring_base_vaddr = ce_ring->base_addr_owner_space;
0564 params.num_entries = ce_ring->nentries;
0565
0566 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
0567 ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms);
0568
0569 switch (type) {
0570 case HAL_CE_SRC:
0571 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
0572 params.intr_batch_cntr_thres_entries = 1;
0573 break;
0574 case HAL_CE_DST:
0575 params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
0576 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
0577 params.intr_timer_thres_us = 1024;
0578 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
0579 params.low_threshold = ce_ring->nentries - 3;
0580 }
0581 break;
0582 case HAL_CE_DST_STATUS:
0583 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
0584 params.intr_batch_cntr_thres_entries = 1;
0585 params.intr_timer_thres_us = 0x1000;
0586 }
0587 break;
0588 default:
0589 ath11k_warn(ab, "Invalid CE ring type %d\n", type);
0590 return -EINVAL;
0591 }
0592
0593
0594
0595 ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms);
0596 if (ret < 0) {
0597 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
0598 ret, ce_id);
0599 return ret;
0600 }
0601
0602 ce_ring->hal_ring_id = ret;
0603
0604 if (ab->hw_params.supports_shadow_regs &&
0605 ath11k_ce_need_shadow_fix(ce_id))
0606 ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
0607 ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
0608 ce_ring->hal_ring_id);
0609
0610 return 0;
0611 }
0612
0613 static struct ath11k_ce_ring *
0614 ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
0615 {
0616 struct ath11k_ce_ring *ce_ring;
0617 dma_addr_t base_addr;
0618
0619 ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
0620 if (ce_ring == NULL)
0621 return ERR_PTR(-ENOMEM);
0622
0623 ce_ring->nentries = nentries;
0624 ce_ring->nentries_mask = nentries - 1;
0625
0626
0627
0628
0629 ce_ring->base_addr_owner_space_unaligned =
0630 dma_alloc_coherent(ab->dev,
0631 nentries * desc_sz + CE_DESC_RING_ALIGN,
0632 &base_addr, GFP_KERNEL);
0633 if (!ce_ring->base_addr_owner_space_unaligned) {
0634 kfree(ce_ring);
0635 return ERR_PTR(-ENOMEM);
0636 }
0637
0638 ce_ring->base_addr_ce_space_unaligned = base_addr;
0639
0640 ce_ring->base_addr_owner_space = PTR_ALIGN(
0641 ce_ring->base_addr_owner_space_unaligned,
0642 CE_DESC_RING_ALIGN);
0643 ce_ring->base_addr_ce_space = ALIGN(
0644 ce_ring->base_addr_ce_space_unaligned,
0645 CE_DESC_RING_ALIGN);
0646
0647 return ce_ring;
0648 }
0649
0650 static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
0651 {
0652 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
0653 const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
0654 struct ath11k_ce_ring *ring;
0655 int nentries;
0656 int desc_sz;
0657
0658 pipe->attr_flags = attr->flags;
0659
0660 if (attr->src_nentries) {
0661 pipe->send_cb = attr->send_cb;
0662 nentries = roundup_pow_of_two(attr->src_nentries);
0663 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
0664 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
0665 if (IS_ERR(ring))
0666 return PTR_ERR(ring);
0667 pipe->src_ring = ring;
0668 }
0669
0670 if (attr->dest_nentries) {
0671 pipe->recv_cb = attr->recv_cb;
0672 nentries = roundup_pow_of_two(attr->dest_nentries);
0673 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
0674 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
0675 if (IS_ERR(ring))
0676 return PTR_ERR(ring);
0677 pipe->dest_ring = ring;
0678
0679 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
0680 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
0681 if (IS_ERR(ring))
0682 return PTR_ERR(ring);
0683 pipe->status_ring = ring;
0684 }
0685
0686 return 0;
0687 }
0688
0689 void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
0690 {
0691 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
0692 const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
0693
0694 if (attr->src_nentries)
0695 ath11k_ce_tx_process_cb(pipe);
0696
0697 if (pipe->recv_cb)
0698 ath11k_ce_recv_process_cb(pipe);
0699 }
0700
0701 void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
0702 {
0703 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
0704 const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
0705
0706 if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
0707 ath11k_ce_tx_process_cb(pipe);
0708 }
0709 EXPORT_SYMBOL(ath11k_ce_per_engine_service);
0710
0711 int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
0712 u16 transfer_id)
0713 {
0714 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
0715 struct hal_srng *srng;
0716 u32 *desc;
0717 unsigned int write_index, sw_index;
0718 unsigned int nentries_mask;
0719 int ret = 0;
0720 u8 byte_swap_data = 0;
0721 int num_used;
0722
0723
0724
0725
0726
0727 if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
0728 spin_lock_bh(&ab->ce.ce_lock);
0729 write_index = pipe->src_ring->write_index;
0730
0731 sw_index = pipe->src_ring->sw_index;
0732
0733 if (write_index >= sw_index)
0734 num_used = write_index - sw_index;
0735 else
0736 num_used = pipe->src_ring->nentries - sw_index +
0737 write_index;
0738
0739 spin_unlock_bh(&ab->ce.ce_lock);
0740
0741 if (num_used > ATH11K_CE_USAGE_THRESHOLD)
0742 ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
0743 }
0744
0745 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
0746 return -ESHUTDOWN;
0747
0748 spin_lock_bh(&ab->ce.ce_lock);
0749
0750 write_index = pipe->src_ring->write_index;
0751 nentries_mask = pipe->src_ring->nentries_mask;
0752
0753 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
0754
0755 spin_lock_bh(&srng->lock);
0756
0757 ath11k_hal_srng_access_begin(ab, srng);
0758
0759 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
0760 ath11k_hal_srng_access_end(ab, srng);
0761 ret = -ENOBUFS;
0762 goto err_unlock;
0763 }
0764
0765 desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
0766 if (!desc) {
0767 ath11k_hal_srng_access_end(ab, srng);
0768 ret = -ENOBUFS;
0769 goto err_unlock;
0770 }
0771
0772 if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
0773 byte_swap_data = 1;
0774
0775 ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
0776 skb->len, transfer_id, byte_swap_data);
0777
0778 pipe->src_ring->skb[write_index] = skb;
0779 pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
0780 write_index);
0781
0782 ath11k_hal_srng_access_end(ab, srng);
0783
0784 if (ath11k_ce_need_shadow_fix(pipe_id))
0785 ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
0786
0787 spin_unlock_bh(&srng->lock);
0788
0789 spin_unlock_bh(&ab->ce.ce_lock);
0790
0791 return 0;
0792
0793 err_unlock:
0794 spin_unlock_bh(&srng->lock);
0795
0796 spin_unlock_bh(&ab->ce.ce_lock);
0797
0798 return ret;
0799 }
0800
0801 static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
0802 {
0803 struct ath11k_base *ab = pipe->ab;
0804 struct ath11k_ce_ring *ring = pipe->dest_ring;
0805 struct sk_buff *skb;
0806 int i;
0807
0808 if (!(ring && pipe->buf_sz))
0809 return;
0810
0811 for (i = 0; i < ring->nentries; i++) {
0812 skb = ring->skb[i];
0813 if (!skb)
0814 continue;
0815
0816 ring->skb[i] = NULL;
0817 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
0818 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
0819 dev_kfree_skb_any(skb);
0820 }
0821 }
0822
0823 static void ath11k_ce_shadow_config(struct ath11k_base *ab)
0824 {
0825 int i;
0826
0827 for (i = 0; i < ab->hw_params.ce_count; i++) {
0828 if (ab->hw_params.host_ce_config[i].src_nentries)
0829 ath11k_hal_srng_update_shadow_config(ab,
0830 HAL_CE_SRC, i);
0831
0832 if (ab->hw_params.host_ce_config[i].dest_nentries) {
0833 ath11k_hal_srng_update_shadow_config(ab,
0834 HAL_CE_DST, i);
0835
0836 ath11k_hal_srng_update_shadow_config(ab,
0837 HAL_CE_DST_STATUS, i);
0838 }
0839 }
0840 }
0841
0842 void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
0843 u32 **shadow_cfg, u32 *shadow_cfg_len)
0844 {
0845 if (!ab->hw_params.supports_shadow_regs)
0846 return;
0847
0848 ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
0849
0850
0851 if (*shadow_cfg_len)
0852 return;
0853
0854
0855
0856
0857
0858 ath11k_hal_srng_shadow_config(ab);
0859 ath11k_ce_shadow_config(ab);
0860
0861
0862 ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
0863 }
0864 EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
0865
0866 void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
0867 {
0868 struct ath11k_ce_pipe *pipe;
0869 int pipe_num;
0870
0871 ath11k_ce_stop_shadow_timers(ab);
0872
0873 for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
0874 pipe = &ab->ce.ce_pipe[pipe_num];
0875 ath11k_ce_rx_pipe_cleanup(pipe);
0876
0877
0878 ath11k_ce_poll_send_completed(ab, pipe_num);
0879
0880
0881 }
0882 }
0883 EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
0884
0885 void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
0886 {
0887 struct ath11k_ce_pipe *pipe;
0888 int i;
0889 int ret;
0890
0891 for (i = 0; i < ab->hw_params.ce_count; i++) {
0892 pipe = &ab->ce.ce_pipe[i];
0893 ret = ath11k_ce_rx_post_pipe(pipe);
0894 if (ret) {
0895 if (ret == -ENOSPC)
0896 continue;
0897
0898 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
0899 i, ret);
0900 mod_timer(&ab->rx_replenish_retry,
0901 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
0902
0903 return;
0904 }
0905 }
0906 }
0907 EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
0908
0909 void ath11k_ce_rx_replenish_retry(struct timer_list *t)
0910 {
0911 struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
0912
0913 ath11k_ce_rx_post_buf(ab);
0914 }
0915
0916 int ath11k_ce_init_pipes(struct ath11k_base *ab)
0917 {
0918 struct ath11k_ce_pipe *pipe;
0919 int i;
0920 int ret;
0921
0922 for (i = 0; i < ab->hw_params.ce_count; i++) {
0923 pipe = &ab->ce.ce_pipe[i];
0924
0925 if (pipe->src_ring) {
0926 ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
0927 HAL_CE_SRC);
0928 if (ret) {
0929 ath11k_warn(ab, "failed to init src ring: %d\n",
0930 ret);
0931
0932 return ret;
0933 }
0934
0935 pipe->src_ring->write_index = 0;
0936 pipe->src_ring->sw_index = 0;
0937 }
0938
0939 if (pipe->dest_ring) {
0940 ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
0941 HAL_CE_DST);
0942 if (ret) {
0943 ath11k_warn(ab, "failed to init dest ring: %d\n",
0944 ret);
0945
0946 return ret;
0947 }
0948
0949 pipe->rx_buf_needed = pipe->dest_ring->nentries ?
0950 pipe->dest_ring->nentries - 2 : 0;
0951
0952 pipe->dest_ring->write_index = 0;
0953 pipe->dest_ring->sw_index = 0;
0954 }
0955
0956 if (pipe->status_ring) {
0957 ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
0958 HAL_CE_DST_STATUS);
0959 if (ret) {
0960 ath11k_warn(ab, "failed to init dest status ing: %d\n",
0961 ret);
0962
0963 return ret;
0964 }
0965
0966 pipe->status_ring->write_index = 0;
0967 pipe->status_ring->sw_index = 0;
0968 }
0969 }
0970
0971 return 0;
0972 }
0973
0974 void ath11k_ce_free_pipes(struct ath11k_base *ab)
0975 {
0976 struct ath11k_ce_pipe *pipe;
0977 struct ath11k_ce_ring *ce_ring;
0978 int desc_sz;
0979 int i;
0980
0981 for (i = 0; i < ab->hw_params.ce_count; i++) {
0982 pipe = &ab->ce.ce_pipe[i];
0983
0984 if (ath11k_ce_need_shadow_fix(i))
0985 ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
0986
0987 if (pipe->src_ring) {
0988 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
0989 ce_ring = pipe->src_ring;
0990 dma_free_coherent(ab->dev,
0991 pipe->src_ring->nentries * desc_sz +
0992 CE_DESC_RING_ALIGN,
0993 ce_ring->base_addr_owner_space_unaligned,
0994 ce_ring->base_addr_ce_space_unaligned);
0995 kfree(pipe->src_ring);
0996 pipe->src_ring = NULL;
0997 }
0998
0999 if (pipe->dest_ring) {
1000 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
1001 ce_ring = pipe->dest_ring;
1002 dma_free_coherent(ab->dev,
1003 pipe->dest_ring->nentries * desc_sz +
1004 CE_DESC_RING_ALIGN,
1005 ce_ring->base_addr_owner_space_unaligned,
1006 ce_ring->base_addr_ce_space_unaligned);
1007 kfree(pipe->dest_ring);
1008 pipe->dest_ring = NULL;
1009 }
1010
1011 if (pipe->status_ring) {
1012 desc_sz =
1013 ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
1014 ce_ring = pipe->status_ring;
1015 dma_free_coherent(ab->dev,
1016 pipe->status_ring->nentries * desc_sz +
1017 CE_DESC_RING_ALIGN,
1018 ce_ring->base_addr_owner_space_unaligned,
1019 ce_ring->base_addr_ce_space_unaligned);
1020 kfree(pipe->status_ring);
1021 pipe->status_ring = NULL;
1022 }
1023 }
1024 }
1025 EXPORT_SYMBOL(ath11k_ce_free_pipes);
1026
1027 int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
1028 {
1029 struct ath11k_ce_pipe *pipe;
1030 int i;
1031 int ret;
1032 const struct ce_attr *attr;
1033
1034 spin_lock_init(&ab->ce.ce_lock);
1035
1036 for (i = 0; i < ab->hw_params.ce_count; i++) {
1037 attr = &ab->hw_params.host_ce_config[i];
1038 pipe = &ab->ce.ce_pipe[i];
1039 pipe->pipe_num = i;
1040 pipe->ab = ab;
1041 pipe->buf_sz = attr->src_sz_max;
1042
1043 ret = ath11k_ce_alloc_pipe(ab, i);
1044 if (ret) {
1045
1046 ath11k_ce_free_pipes(ab);
1047 return ret;
1048 }
1049 }
1050
1051 return 0;
1052 }
1053 EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
1054
1055
1056
1057
1058
1059 void ath11k_ce_byte_swap(void *mem, u32 len)
1060 {
1061 int i;
1062
1063 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
1064 if (!mem)
1065 return;
1066
1067 for (i = 0; i < (len / 4); i++) {
1068 *(u32 *)mem = swab32(*(u32 *)mem);
1069 mem += 4;
1070 }
1071 }
1072 }
1073
1074 int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
1075 {
1076 if (ce_id >= ab->hw_params.ce_count)
1077 return -EINVAL;
1078
1079 return ab->hw_params.host_ce_config[ce_id].flags;
1080 }
1081 EXPORT_SYMBOL(ath11k_ce_get_attr_flags);