0001
0002
0003
0004
0005
0006
0007
0008 #include "hif.h"
0009 #include "ce.h"
0010 #include "debug.h"
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
0052 struct ath10k_ce_pipe *ce_state)
0053 {
0054 u32 ce_id = ce_state->id;
0055 u32 addr = 0;
0056
0057 switch (ce_id) {
0058 case 0:
0059 addr = 0x00032000;
0060 break;
0061 case 3:
0062 addr = 0x0003200C;
0063 break;
0064 case 4:
0065 addr = 0x00032010;
0066 break;
0067 case 5:
0068 addr = 0x00032014;
0069 break;
0070 case 7:
0071 addr = 0x0003201C;
0072 break;
0073 default:
0074 ath10k_warn(ar, "invalid CE id: %d", ce_id);
0075 break;
0076 }
0077 return addr;
0078 }
0079
0080 static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar,
0081 struct ath10k_ce_pipe *ce_state)
0082 {
0083 u32 ce_id = ce_state->id;
0084 u32 addr = 0;
0085
0086 switch (ce_id) {
0087 case 1:
0088 addr = 0x00032034;
0089 break;
0090 case 2:
0091 addr = 0x00032038;
0092 break;
0093 case 5:
0094 addr = 0x00032044;
0095 break;
0096 case 7:
0097 addr = 0x0003204C;
0098 break;
0099 case 8:
0100 addr = 0x00032050;
0101 break;
0102 case 9:
0103 addr = 0x00032054;
0104 break;
0105 case 10:
0106 addr = 0x00032058;
0107 break;
0108 case 11:
0109 addr = 0x0003205C;
0110 break;
0111 default:
0112 ath10k_warn(ar, "invalid CE id: %d", ce_id);
0113 break;
0114 }
0115
0116 return addr;
0117 }
0118
0119 static inline unsigned int
0120 ath10k_set_ring_byte(unsigned int offset,
0121 struct ath10k_hw_ce_regs_addr_map *addr_map)
0122 {
0123 return ((offset << addr_map->lsb) & addr_map->mask);
0124 }
0125
0126 static inline unsigned int
0127 ath10k_get_ring_byte(unsigned int offset,
0128 struct ath10k_hw_ce_regs_addr_map *addr_map)
0129 {
0130 return ((offset & addr_map->mask) >> (addr_map->lsb));
0131 }
0132
0133 static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
0134 {
0135 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0136
0137 return ce->bus_ops->read32(ar, offset);
0138 }
0139
0140 static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
0141 {
0142 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0143
0144 ce->bus_ops->write32(ar, offset, value);
0145 }
0146
0147 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
0148 u32 ce_ctrl_addr,
0149 unsigned int n)
0150 {
0151 ath10k_ce_write32(ar, ce_ctrl_addr +
0152 ar->hw_ce_regs->dst_wr_index_addr, n);
0153 }
0154
0155 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
0156 u32 ce_ctrl_addr)
0157 {
0158 return ath10k_ce_read32(ar, ce_ctrl_addr +
0159 ar->hw_ce_regs->dst_wr_index_addr);
0160 }
0161
0162 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
0163 u32 ce_ctrl_addr,
0164 unsigned int n)
0165 {
0166 ath10k_ce_write32(ar, ce_ctrl_addr +
0167 ar->hw_ce_regs->sr_wr_index_addr, n);
0168 }
0169
0170 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
0171 u32 ce_ctrl_addr)
0172 {
0173 return ath10k_ce_read32(ar, ce_ctrl_addr +
0174 ar->hw_ce_regs->sr_wr_index_addr);
0175 }
0176
0177 static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
0178 u32 ce_id)
0179 {
0180 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0181
0182 return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
0183 }
0184
0185 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
0186 u32 ce_ctrl_addr)
0187 {
0188 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0189 u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
0190 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
0191 u32 index;
0192
0193 if (ar->hw_params.rri_on_ddr &&
0194 (ce_state->attr_flags & CE_ATTR_DIS_INTR))
0195 index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
0196 else
0197 index = ath10k_ce_read32(ar, ce_ctrl_addr +
0198 ar->hw_ce_regs->current_srri_addr);
0199
0200 return index;
0201 }
0202
0203 static inline void
0204 ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
0205 struct ath10k_ce_pipe *ce_state,
0206 unsigned int value)
0207 {
0208 ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
0209 }
0210
0211 static inline void
0212 ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
0213 struct ath10k_ce_pipe *ce_state,
0214 unsigned int value)
0215 {
0216 ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
0217 }
0218
0219 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
0220 u32 ce_id,
0221 u64 addr)
0222 {
0223 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0224 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
0225 u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
0226 u32 addr_lo = lower_32_bits(addr);
0227
0228 ath10k_ce_write32(ar, ce_ctrl_addr +
0229 ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
0230
0231 if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
0232 ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
0233 addr);
0234 }
0235 }
0236
0237 static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
0238 u32 ce_ctrl_addr,
0239 u64 addr)
0240 {
0241 u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
0242
0243 ath10k_ce_write32(ar, ce_ctrl_addr +
0244 ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
0245 }
0246
0247 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
0248 u32 ce_ctrl_addr,
0249 unsigned int n)
0250 {
0251 ath10k_ce_write32(ar, ce_ctrl_addr +
0252 ar->hw_ce_regs->sr_size_addr, n);
0253 }
0254
0255 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
0256 u32 ce_ctrl_addr,
0257 unsigned int n)
0258 {
0259 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
0260
0261 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
0262 ctrl_regs->addr);
0263
0264 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
0265 (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
0266 ath10k_set_ring_byte(n, ctrl_regs->dmax));
0267 }
0268
0269 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
0270 u32 ce_ctrl_addr,
0271 unsigned int n)
0272 {
0273 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
0274
0275 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
0276 ctrl_regs->addr);
0277
0278 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
0279 (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
0280 ath10k_set_ring_byte(n, ctrl_regs->src_ring));
0281 }
0282
0283 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
0284 u32 ce_ctrl_addr,
0285 unsigned int n)
0286 {
0287 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
0288
0289 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
0290 ctrl_regs->addr);
0291
0292 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
0293 (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
0294 ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
0295 }
0296
0297 static inline
0298 u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
0299 {
0300 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0301
0302 return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
0303 CE_DDR_RRI_MASK;
0304 }
0305
0306 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
0307 u32 ce_ctrl_addr)
0308 {
0309 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0310 u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
0311 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
0312 u32 index;
0313
0314 if (ar->hw_params.rri_on_ddr &&
0315 (ce_state->attr_flags & CE_ATTR_DIS_INTR))
0316 index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
0317 else
0318 index = ath10k_ce_read32(ar, ce_ctrl_addr +
0319 ar->hw_ce_regs->current_drri_addr);
0320
0321 return index;
0322 }
0323
0324 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
0325 u32 ce_id,
0326 u64 addr)
0327 {
0328 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0329 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
0330 u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
0331 u32 addr_lo = lower_32_bits(addr);
0332
0333 ath10k_ce_write32(ar, ce_ctrl_addr +
0334 ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
0335
0336 if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
0337 ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
0338 addr);
0339 }
0340 }
0341
0342 static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
0343 u32 ce_ctrl_addr,
0344 u64 addr)
0345 {
0346 u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
0347 u32 reg_value;
0348
0349 reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
0350 ar->hw_ce_regs->dr_base_addr_hi);
0351 reg_value &= ~CE_DESC_ADDR_HI_MASK;
0352 reg_value |= addr_hi;
0353 ath10k_ce_write32(ar, ce_ctrl_addr +
0354 ar->hw_ce_regs->dr_base_addr_hi, reg_value);
0355 }
0356
0357 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
0358 u32 ce_ctrl_addr,
0359 unsigned int n)
0360 {
0361 ath10k_ce_write32(ar, ce_ctrl_addr +
0362 ar->hw_ce_regs->dr_size_addr, n);
0363 }
0364
0365 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
0366 u32 ce_ctrl_addr,
0367 unsigned int n)
0368 {
0369 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
0370 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
0371
0372 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
0373 (addr & ~(srcr_wm->wm_high->mask)) |
0374 (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
0375 }
0376
0377 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
0378 u32 ce_ctrl_addr,
0379 unsigned int n)
0380 {
0381 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
0382 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
0383
0384 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
0385 (addr & ~(srcr_wm->wm_low->mask)) |
0386 (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
0387 }
0388
0389 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
0390 u32 ce_ctrl_addr,
0391 unsigned int n)
0392 {
0393 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
0394 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
0395
0396 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
0397 (addr & ~(dstr_wm->wm_high->mask)) |
0398 (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
0399 }
0400
0401 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
0402 u32 ce_ctrl_addr,
0403 unsigned int n)
0404 {
0405 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
0406 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
0407
0408 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
0409 (addr & ~(dstr_wm->wm_low->mask)) |
0410 (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
0411 }
0412
0413 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
0414 u32 ce_ctrl_addr)
0415 {
0416 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
0417
0418 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
0419 ar->hw_ce_regs->host_ie_addr);
0420
0421 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
0422 host_ie_addr | host_ie->copy_complete->mask);
0423 }
0424
0425 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
0426 u32 ce_ctrl_addr)
0427 {
0428 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
0429
0430 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
0431 ar->hw_ce_regs->host_ie_addr);
0432
0433 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
0434 host_ie_addr & ~(host_ie->copy_complete->mask));
0435 }
0436
0437 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
0438 u32 ce_ctrl_addr)
0439 {
0440 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
0441
0442 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
0443 ar->hw_ce_regs->host_ie_addr);
0444
0445 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
0446 host_ie_addr & ~(wm_regs->wm_mask));
0447 }
0448
0449 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
0450 u32 ce_ctrl_addr)
0451 {
0452 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
0453
0454 u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
0455 ar->hw_ce_regs->misc_ie_addr);
0456
0457 ath10k_ce_write32(ar,
0458 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
0459 misc_ie_addr | misc_regs->err_mask);
0460 }
0461
0462 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
0463 u32 ce_ctrl_addr)
0464 {
0465 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
0466
0467 u32 misc_ie_addr = ath10k_ce_read32(ar,
0468 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
0469
0470 ath10k_ce_write32(ar,
0471 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
0472 misc_ie_addr & ~(misc_regs->err_mask));
0473 }
0474
0475 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
0476 u32 ce_ctrl_addr,
0477 unsigned int mask)
0478 {
0479 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
0480
0481 ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
0482 }
0483
0484
0485
0486
0487
0488 static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
0489 void *per_transfer_context,
0490 dma_addr_t buffer,
0491 unsigned int nbytes,
0492 unsigned int transfer_id,
0493 unsigned int flags)
0494 {
0495 struct ath10k *ar = ce_state->ar;
0496 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
0497 struct ce_desc *desc, sdesc;
0498 unsigned int nentries_mask = src_ring->nentries_mask;
0499 unsigned int sw_index = src_ring->sw_index;
0500 unsigned int write_index = src_ring->write_index;
0501 u32 ctrl_addr = ce_state->ctrl_addr;
0502 u32 desc_flags = 0;
0503 int ret = 0;
0504
0505 if (nbytes > ce_state->src_sz_max)
0506 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
0507 __func__, nbytes, ce_state->src_sz_max);
0508
0509 if (unlikely(CE_RING_DELTA(nentries_mask,
0510 write_index, sw_index - 1) <= 0)) {
0511 ret = -ENOSR;
0512 goto exit;
0513 }
0514
0515 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
0516 write_index);
0517
0518 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
0519
0520 if (flags & CE_SEND_FLAG_GATHER)
0521 desc_flags |= CE_DESC_FLAGS_GATHER;
0522 if (flags & CE_SEND_FLAG_BYTE_SWAP)
0523 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
0524
0525 sdesc.addr = __cpu_to_le32(buffer);
0526 sdesc.nbytes = __cpu_to_le16(nbytes);
0527 sdesc.flags = __cpu_to_le16(desc_flags);
0528
0529 *desc = sdesc;
0530
0531 src_ring->per_transfer_context[write_index] = per_transfer_context;
0532
0533
0534 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
0535
0536
0537 if (!(flags & CE_SEND_FLAG_GATHER))
0538 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
0539
0540 src_ring->write_index = write_index;
0541 exit:
0542 return ret;
0543 }
0544
0545 static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
0546 void *per_transfer_context,
0547 dma_addr_t buffer,
0548 unsigned int nbytes,
0549 unsigned int transfer_id,
0550 unsigned int flags)
0551 {
0552 struct ath10k *ar = ce_state->ar;
0553 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
0554 struct ce_desc_64 *desc, sdesc;
0555 unsigned int nentries_mask = src_ring->nentries_mask;
0556 unsigned int sw_index;
0557 unsigned int write_index = src_ring->write_index;
0558 u32 ctrl_addr = ce_state->ctrl_addr;
0559 __le32 *addr;
0560 u32 desc_flags = 0;
0561 int ret = 0;
0562
0563 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
0564 return -ESHUTDOWN;
0565
0566 if (nbytes > ce_state->src_sz_max)
0567 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
0568 __func__, nbytes, ce_state->src_sz_max);
0569
0570 if (ar->hw_params.rri_on_ddr)
0571 sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
0572 else
0573 sw_index = src_ring->sw_index;
0574
0575 if (unlikely(CE_RING_DELTA(nentries_mask,
0576 write_index, sw_index - 1) <= 0)) {
0577 ret = -ENOSR;
0578 goto exit;
0579 }
0580
0581 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
0582 write_index);
0583
0584 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
0585
0586 if (flags & CE_SEND_FLAG_GATHER)
0587 desc_flags |= CE_DESC_FLAGS_GATHER;
0588
0589 if (flags & CE_SEND_FLAG_BYTE_SWAP)
0590 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
0591
0592 addr = (__le32 *)&sdesc.addr;
0593
0594 flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
0595 addr[0] = __cpu_to_le32(buffer);
0596 addr[1] = __cpu_to_le32(flags);
0597 if (flags & CE_SEND_FLAG_GATHER)
0598 addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
0599 else
0600 addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
0601
0602 sdesc.nbytes = __cpu_to_le16(nbytes);
0603 sdesc.flags = __cpu_to_le16(desc_flags);
0604
0605 *desc = sdesc;
0606
0607 src_ring->per_transfer_context[write_index] = per_transfer_context;
0608
0609
0610 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
0611
0612 if (!(flags & CE_SEND_FLAG_GATHER)) {
0613 if (ar->hw_params.shadow_reg_support)
0614 ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
0615 write_index);
0616 else
0617 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
0618 write_index);
0619 }
0620
0621 src_ring->write_index = write_index;
0622 exit:
0623 return ret;
0624 }
0625
0626 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
0627 void *per_transfer_context,
0628 dma_addr_t buffer,
0629 unsigned int nbytes,
0630 unsigned int transfer_id,
0631 unsigned int flags)
0632 {
0633 return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
0634 buffer, nbytes, transfer_id, flags);
0635 }
0636 EXPORT_SYMBOL(ath10k_ce_send_nolock);
0637
0638 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
0639 {
0640 struct ath10k *ar = pipe->ar;
0641 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0642 struct ath10k_ce_ring *src_ring = pipe->src_ring;
0643 u32 ctrl_addr = pipe->ctrl_addr;
0644
0645 lockdep_assert_held(&ce->ce_lock);
0646
0647
0648
0649
0650
0651
0652 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
0653 return;
0654
0655 if (WARN_ON_ONCE(src_ring->write_index ==
0656 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
0657 return;
0658
0659 src_ring->write_index--;
0660 src_ring->write_index &= src_ring->nentries_mask;
0661
0662 src_ring->per_transfer_context[src_ring->write_index] = NULL;
0663 }
0664 EXPORT_SYMBOL(__ath10k_ce_send_revert);
0665
0666 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
0667 void *per_transfer_context,
0668 dma_addr_t buffer,
0669 unsigned int nbytes,
0670 unsigned int transfer_id,
0671 unsigned int flags)
0672 {
0673 struct ath10k *ar = ce_state->ar;
0674 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0675 int ret;
0676
0677 spin_lock_bh(&ce->ce_lock);
0678 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
0679 buffer, nbytes, transfer_id, flags);
0680 spin_unlock_bh(&ce->ce_lock);
0681
0682 return ret;
0683 }
0684 EXPORT_SYMBOL(ath10k_ce_send);
0685
0686 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
0687 {
0688 struct ath10k *ar = pipe->ar;
0689 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0690 int delta;
0691
0692 spin_lock_bh(&ce->ce_lock);
0693 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
0694 pipe->src_ring->write_index,
0695 pipe->src_ring->sw_index - 1);
0696 spin_unlock_bh(&ce->ce_lock);
0697
0698 return delta;
0699 }
0700 EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
0701
0702 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
0703 {
0704 struct ath10k *ar = pipe->ar;
0705 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0706 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
0707 unsigned int nentries_mask = dest_ring->nentries_mask;
0708 unsigned int write_index = dest_ring->write_index;
0709 unsigned int sw_index = dest_ring->sw_index;
0710
0711 lockdep_assert_held(&ce->ce_lock);
0712
0713 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
0714 }
0715 EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
0716
0717 static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
0718 dma_addr_t paddr)
0719 {
0720 struct ath10k *ar = pipe->ar;
0721 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0722 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
0723 unsigned int nentries_mask = dest_ring->nentries_mask;
0724 unsigned int write_index = dest_ring->write_index;
0725 unsigned int sw_index = dest_ring->sw_index;
0726 struct ce_desc *base = dest_ring->base_addr_owner_space;
0727 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
0728 u32 ctrl_addr = pipe->ctrl_addr;
0729
0730 lockdep_assert_held(&ce->ce_lock);
0731
0732 if ((pipe->id != 5) &&
0733 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
0734 return -ENOSPC;
0735
0736 desc->addr = __cpu_to_le32(paddr);
0737 desc->nbytes = 0;
0738
0739 dest_ring->per_transfer_context[write_index] = ctx;
0740 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
0741 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
0742 dest_ring->write_index = write_index;
0743
0744 return 0;
0745 }
0746
0747 static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
0748 void *ctx,
0749 dma_addr_t paddr)
0750 {
0751 struct ath10k *ar = pipe->ar;
0752 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0753 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
0754 unsigned int nentries_mask = dest_ring->nentries_mask;
0755 unsigned int write_index = dest_ring->write_index;
0756 unsigned int sw_index = dest_ring->sw_index;
0757 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
0758 struct ce_desc_64 *desc =
0759 CE_DEST_RING_TO_DESC_64(base, write_index);
0760 u32 ctrl_addr = pipe->ctrl_addr;
0761
0762 lockdep_assert_held(&ce->ce_lock);
0763
0764 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
0765 return -ENOSPC;
0766
0767 desc->addr = __cpu_to_le64(paddr);
0768 desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
0769
0770 desc->nbytes = 0;
0771
0772 dest_ring->per_transfer_context[write_index] = ctx;
0773 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
0774 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
0775 dest_ring->write_index = write_index;
0776
0777 return 0;
0778 }
0779
0780 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
0781 {
0782 struct ath10k *ar = pipe->ar;
0783 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
0784 unsigned int nentries_mask = dest_ring->nentries_mask;
0785 unsigned int write_index = dest_ring->write_index;
0786 u32 ctrl_addr = pipe->ctrl_addr;
0787 u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
0788
0789
0790
0791
0792 if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
0793 nentries -= 1;
0794
0795 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
0796 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
0797 dest_ring->write_index = write_index;
0798 }
0799 EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
0800
0801 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
0802 dma_addr_t paddr)
0803 {
0804 struct ath10k *ar = pipe->ar;
0805 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0806 int ret;
0807
0808 spin_lock_bh(&ce->ce_lock);
0809 ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
0810 spin_unlock_bh(&ce->ce_lock);
0811
0812 return ret;
0813 }
0814 EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
0815
0816
0817
0818
0819
0820 static int
0821 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
0822 void **per_transfer_contextp,
0823 unsigned int *nbytesp)
0824 {
0825 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
0826 unsigned int nentries_mask = dest_ring->nentries_mask;
0827 unsigned int sw_index = dest_ring->sw_index;
0828
0829 struct ce_desc *base = dest_ring->base_addr_owner_space;
0830 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
0831 struct ce_desc sdesc;
0832 u16 nbytes;
0833
0834
0835 sdesc = *desc;
0836
0837 nbytes = __le16_to_cpu(sdesc.nbytes);
0838 if (nbytes == 0) {
0839
0840
0841
0842
0843
0844
0845 return -EIO;
0846 }
0847
0848 desc->nbytes = 0;
0849
0850
0851 *nbytesp = nbytes;
0852
0853 if (per_transfer_contextp)
0854 *per_transfer_contextp =
0855 dest_ring->per_transfer_context[sw_index];
0856
0857
0858
0859
0860 if (ce_state->id != 5)
0861 dest_ring->per_transfer_context[sw_index] = NULL;
0862
0863
0864 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
0865 dest_ring->sw_index = sw_index;
0866
0867 return 0;
0868 }
0869
0870 static int
0871 _ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
0872 void **per_transfer_contextp,
0873 unsigned int *nbytesp)
0874 {
0875 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
0876 unsigned int nentries_mask = dest_ring->nentries_mask;
0877 unsigned int sw_index = dest_ring->sw_index;
0878 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
0879 struct ce_desc_64 *desc =
0880 CE_DEST_RING_TO_DESC_64(base, sw_index);
0881 struct ce_desc_64 sdesc;
0882 u16 nbytes;
0883
0884
0885 sdesc = *desc;
0886
0887 nbytes = __le16_to_cpu(sdesc.nbytes);
0888 if (nbytes == 0) {
0889
0890
0891
0892
0893
0894 return -EIO;
0895 }
0896
0897 desc->nbytes = 0;
0898
0899
0900 *nbytesp = nbytes;
0901
0902 if (per_transfer_contextp)
0903 *per_transfer_contextp =
0904 dest_ring->per_transfer_context[sw_index];
0905
0906
0907
0908
0909 if (ce_state->id != 5)
0910 dest_ring->per_transfer_context[sw_index] = NULL;
0911
0912
0913 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
0914 dest_ring->sw_index = sw_index;
0915
0916 return 0;
0917 }
0918
0919 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
0920 void **per_transfer_ctx,
0921 unsigned int *nbytesp)
0922 {
0923 return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
0924 per_transfer_ctx,
0925 nbytesp);
0926 }
0927 EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
0928
0929 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
0930 void **per_transfer_contextp,
0931 unsigned int *nbytesp)
0932 {
0933 struct ath10k *ar = ce_state->ar;
0934 struct ath10k_ce *ce = ath10k_ce_priv(ar);
0935 int ret;
0936
0937 spin_lock_bh(&ce->ce_lock);
0938 ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
0939 per_transfer_contextp,
0940 nbytesp);
0941
0942 spin_unlock_bh(&ce->ce_lock);
0943
0944 return ret;
0945 }
0946 EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
0947
0948 static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
0949 void **per_transfer_contextp,
0950 dma_addr_t *bufferp)
0951 {
0952 struct ath10k_ce_ring *dest_ring;
0953 unsigned int nentries_mask;
0954 unsigned int sw_index;
0955 unsigned int write_index;
0956 int ret;
0957 struct ath10k *ar;
0958 struct ath10k_ce *ce;
0959
0960 dest_ring = ce_state->dest_ring;
0961
0962 if (!dest_ring)
0963 return -EIO;
0964
0965 ar = ce_state->ar;
0966 ce = ath10k_ce_priv(ar);
0967
0968 spin_lock_bh(&ce->ce_lock);
0969
0970 nentries_mask = dest_ring->nentries_mask;
0971 sw_index = dest_ring->sw_index;
0972 write_index = dest_ring->write_index;
0973 if (write_index != sw_index) {
0974 struct ce_desc *base = dest_ring->base_addr_owner_space;
0975 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
0976
0977
0978 *bufferp = __le32_to_cpu(desc->addr);
0979
0980 if (per_transfer_contextp)
0981 *per_transfer_contextp =
0982 dest_ring->per_transfer_context[sw_index];
0983
0984
0985 dest_ring->per_transfer_context[sw_index] = NULL;
0986 desc->nbytes = 0;
0987
0988
0989 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
0990 dest_ring->sw_index = sw_index;
0991 ret = 0;
0992 } else {
0993 ret = -EIO;
0994 }
0995
0996 spin_unlock_bh(&ce->ce_lock);
0997
0998 return ret;
0999 }
1000
1001 static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
1002 void **per_transfer_contextp,
1003 dma_addr_t *bufferp)
1004 {
1005 struct ath10k_ce_ring *dest_ring;
1006 unsigned int nentries_mask;
1007 unsigned int sw_index;
1008 unsigned int write_index;
1009 int ret;
1010 struct ath10k *ar;
1011 struct ath10k_ce *ce;
1012
1013 dest_ring = ce_state->dest_ring;
1014
1015 if (!dest_ring)
1016 return -EIO;
1017
1018 ar = ce_state->ar;
1019 ce = ath10k_ce_priv(ar);
1020
1021 spin_lock_bh(&ce->ce_lock);
1022
1023 nentries_mask = dest_ring->nentries_mask;
1024 sw_index = dest_ring->sw_index;
1025 write_index = dest_ring->write_index;
1026 if (write_index != sw_index) {
1027 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
1028 struct ce_desc_64 *desc =
1029 CE_DEST_RING_TO_DESC_64(base, sw_index);
1030
1031
1032 *bufferp = __le64_to_cpu(desc->addr);
1033
1034 if (per_transfer_contextp)
1035 *per_transfer_contextp =
1036 dest_ring->per_transfer_context[sw_index];
1037
1038
1039 dest_ring->per_transfer_context[sw_index] = NULL;
1040 desc->nbytes = 0;
1041
1042
1043 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1044 dest_ring->sw_index = sw_index;
1045 ret = 0;
1046 } else {
1047 ret = -EIO;
1048 }
1049
1050 spin_unlock_bh(&ce->ce_lock);
1051
1052 return ret;
1053 }
1054
1055 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
1056 void **per_transfer_contextp,
1057 dma_addr_t *bufferp)
1058 {
1059 return ce_state->ops->ce_revoke_recv_next(ce_state,
1060 per_transfer_contextp,
1061 bufferp);
1062 }
1063 EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
1064
1065
1066
1067
1068
1069 static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1070 void **per_transfer_contextp)
1071 {
1072 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1073 u32 ctrl_addr = ce_state->ctrl_addr;
1074 struct ath10k *ar = ce_state->ar;
1075 unsigned int nentries_mask = src_ring->nentries_mask;
1076 unsigned int sw_index = src_ring->sw_index;
1077 unsigned int read_index;
1078 struct ce_desc *desc;
1079
1080 if (src_ring->hw_index == sw_index) {
1081
1082
1083
1084
1085
1086
1087
1088
1089 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1090 if (read_index == 0xffffffff)
1091 return -ENODEV;
1092
1093 read_index &= nentries_mask;
1094 src_ring->hw_index = read_index;
1095 }
1096
1097 if (ar->hw_params.rri_on_ddr)
1098 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1099 else
1100 read_index = src_ring->hw_index;
1101
1102 if (read_index == sw_index)
1103 return -EIO;
1104
1105 if (per_transfer_contextp)
1106 *per_transfer_contextp =
1107 src_ring->per_transfer_context[sw_index];
1108
1109
1110 src_ring->per_transfer_context[sw_index] = NULL;
1111 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
1112 sw_index);
1113 desc->nbytes = 0;
1114
1115
1116 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1117 src_ring->sw_index = sw_index;
1118
1119 return 0;
1120 }
1121
1122 static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
1123 void **per_transfer_contextp)
1124 {
1125 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1126 u32 ctrl_addr = ce_state->ctrl_addr;
1127 struct ath10k *ar = ce_state->ar;
1128 unsigned int nentries_mask = src_ring->nentries_mask;
1129 unsigned int sw_index = src_ring->sw_index;
1130 unsigned int read_index;
1131 struct ce_desc_64 *desc;
1132
1133 if (src_ring->hw_index == sw_index) {
1134
1135
1136
1137
1138
1139
1140
1141
1142 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1143 if (read_index == 0xffffffff)
1144 return -ENODEV;
1145
1146 read_index &= nentries_mask;
1147 src_ring->hw_index = read_index;
1148 }
1149
1150 if (ar->hw_params.rri_on_ddr)
1151 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1152 else
1153 read_index = src_ring->hw_index;
1154
1155 if (read_index == sw_index)
1156 return -EIO;
1157
1158 if (per_transfer_contextp)
1159 *per_transfer_contextp =
1160 src_ring->per_transfer_context[sw_index];
1161
1162
1163 src_ring->per_transfer_context[sw_index] = NULL;
1164 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
1165 sw_index);
1166 desc->nbytes = 0;
1167
1168
1169 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1170 src_ring->sw_index = sw_index;
1171
1172 return 0;
1173 }
1174
1175 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1176 void **per_transfer_contextp)
1177 {
1178 return ce_state->ops->ce_completed_send_next_nolock(ce_state,
1179 per_transfer_contextp);
1180 }
1181 EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
1182
1183 static void ath10k_ce_extract_desc_data(struct ath10k *ar,
1184 struct ath10k_ce_ring *src_ring,
1185 u32 sw_index,
1186 dma_addr_t *bufferp,
1187 u32 *nbytesp,
1188 u32 *transfer_idp)
1189 {
1190 struct ce_desc *base = src_ring->base_addr_owner_space;
1191 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
1192
1193
1194 *bufferp = __le32_to_cpu(desc->addr);
1195 *nbytesp = __le16_to_cpu(desc->nbytes);
1196 *transfer_idp = MS(__le16_to_cpu(desc->flags),
1197 CE_DESC_FLAGS_META_DATA);
1198 }
1199
1200 static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
1201 struct ath10k_ce_ring *src_ring,
1202 u32 sw_index,
1203 dma_addr_t *bufferp,
1204 u32 *nbytesp,
1205 u32 *transfer_idp)
1206 {
1207 struct ce_desc_64 *base = src_ring->base_addr_owner_space;
1208 struct ce_desc_64 *desc =
1209 CE_SRC_RING_TO_DESC_64(base, sw_index);
1210
1211
1212 *bufferp = __le64_to_cpu(desc->addr);
1213 *nbytesp = __le16_to_cpu(desc->nbytes);
1214 *transfer_idp = MS(__le16_to_cpu(desc->flags),
1215 CE_DESC_FLAGS_META_DATA);
1216 }
1217
1218
1219 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
1220 void **per_transfer_contextp,
1221 dma_addr_t *bufferp,
1222 unsigned int *nbytesp,
1223 unsigned int *transfer_idp)
1224 {
1225 struct ath10k_ce_ring *src_ring;
1226 unsigned int nentries_mask;
1227 unsigned int sw_index;
1228 unsigned int write_index;
1229 int ret;
1230 struct ath10k *ar;
1231 struct ath10k_ce *ce;
1232
1233 src_ring = ce_state->src_ring;
1234
1235 if (!src_ring)
1236 return -EIO;
1237
1238 ar = ce_state->ar;
1239 ce = ath10k_ce_priv(ar);
1240
1241 spin_lock_bh(&ce->ce_lock);
1242
1243 nentries_mask = src_ring->nentries_mask;
1244 sw_index = src_ring->sw_index;
1245 write_index = src_ring->write_index;
1246
1247 if (write_index != sw_index) {
1248 ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
1249 bufferp, nbytesp,
1250 transfer_idp);
1251
1252 if (per_transfer_contextp)
1253 *per_transfer_contextp =
1254 src_ring->per_transfer_context[sw_index];
1255
1256
1257 src_ring->per_transfer_context[sw_index] = NULL;
1258
1259
1260 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1261 src_ring->sw_index = sw_index;
1262 ret = 0;
1263 } else {
1264 ret = -EIO;
1265 }
1266
1267 spin_unlock_bh(&ce->ce_lock);
1268
1269 return ret;
1270 }
1271 EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
1272
1273 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
1274 void **per_transfer_contextp)
1275 {
1276 struct ath10k *ar = ce_state->ar;
1277 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1278 int ret;
1279
1280 spin_lock_bh(&ce->ce_lock);
1281 ret = ath10k_ce_completed_send_next_nolock(ce_state,
1282 per_transfer_contextp);
1283 spin_unlock_bh(&ce->ce_lock);
1284
1285 return ret;
1286 }
1287 EXPORT_SYMBOL(ath10k_ce_completed_send_next);
1288
1289
1290
1291
1292
1293
1294
1295 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
1296 {
1297 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1298 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1299 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
1300 u32 ctrl_addr = ce_state->ctrl_addr;
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
1313 wm_regs->cc_mask | wm_regs->wm_mask);
1314
1315 if (ce_state->recv_cb)
1316 ce_state->recv_cb(ce_state);
1317
1318 if (ce_state->send_cb)
1319 ce_state->send_cb(ce_state);
1320 }
1321 EXPORT_SYMBOL(ath10k_ce_per_engine_service);
1322
1323
1324
1325
1326
1327
1328
1329 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
1330 {
1331 int ce_id;
1332 u32 intr_summary;
1333
1334 intr_summary = ath10k_ce_interrupt_summary(ar);
1335
1336 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
1337 if (intr_summary & (1 << ce_id))
1338 intr_summary &= ~(1 << ce_id);
1339 else
1340
1341 continue;
1342
1343 ath10k_ce_per_engine_service(ar, ce_id);
1344 }
1345 }
1346 EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
1347
1348
1349
1350
1351
1352
1353
1354
1355 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
1356 {
1357 u32 ctrl_addr = ce_state->ctrl_addr;
1358 struct ath10k *ar = ce_state->ar;
1359 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
1360
1361 if ((!disable_copy_compl_intr) &&
1362 (ce_state->send_cb || ce_state->recv_cb))
1363 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
1364 else
1365 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1366
1367 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1368 }
1369
1370 void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
1371 {
1372 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1373 struct ath10k_ce_pipe *ce_state;
1374 u32 ctrl_addr;
1375
1376 ce_state = &ce->ce_states[ce_id];
1377 if (ce_state->attr_flags & CE_ATTR_POLL)
1378 return;
1379
1380 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1381
1382 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1383 ath10k_ce_error_intr_disable(ar, ctrl_addr);
1384 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1385 }
1386 EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
1387
1388 void ath10k_ce_disable_interrupts(struct ath10k *ar)
1389 {
1390 int ce_id;
1391
1392 for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1393 ath10k_ce_disable_interrupt(ar, ce_id);
1394 }
1395 EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
1396
1397 void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
1398 {
1399 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1400 struct ath10k_ce_pipe *ce_state;
1401
1402 ce_state = &ce->ce_states[ce_id];
1403 if (ce_state->attr_flags & CE_ATTR_POLL)
1404 return;
1405
1406 ath10k_ce_per_engine_handler_adjust(ce_state);
1407 }
1408 EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
1409
1410 void ath10k_ce_enable_interrupts(struct ath10k *ar)
1411 {
1412 int ce_id;
1413
1414
1415
1416
1417 for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1418 ath10k_ce_enable_interrupt(ar, ce_id);
1419 }
1420 EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
1421
1422 static int ath10k_ce_init_src_ring(struct ath10k *ar,
1423 unsigned int ce_id,
1424 const struct ce_attr *attr)
1425 {
1426 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1427 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1428 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1429 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1430
1431 nentries = roundup_pow_of_two(attr->src_nentries);
1432
1433 if (ar->hw_params.target_64bit)
1434 memset(src_ring->base_addr_owner_space, 0,
1435 nentries * sizeof(struct ce_desc_64));
1436 else
1437 memset(src_ring->base_addr_owner_space, 0,
1438 nentries * sizeof(struct ce_desc));
1439
1440 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1441 src_ring->sw_index &= src_ring->nentries_mask;
1442 src_ring->hw_index = src_ring->sw_index;
1443
1444 src_ring->write_index =
1445 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
1446 src_ring->write_index &= src_ring->nentries_mask;
1447
1448 ath10k_ce_src_ring_base_addr_set(ar, ce_id,
1449 src_ring->base_addr_ce_space);
1450 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
1451 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
1452 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1453 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1454 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1455
1456 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1457 "boot init ce src ring id %d entries %d base_addr %pK\n",
1458 ce_id, nentries, src_ring->base_addr_owner_space);
1459
1460 return 0;
1461 }
1462
1463 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1464 unsigned int ce_id,
1465 const struct ce_attr *attr)
1466 {
1467 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1468 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1469 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
1470 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1471
1472 nentries = roundup_pow_of_two(attr->dest_nentries);
1473
1474 if (ar->hw_params.target_64bit)
1475 memset(dest_ring->base_addr_owner_space, 0,
1476 nentries * sizeof(struct ce_desc_64));
1477 else
1478 memset(dest_ring->base_addr_owner_space, 0,
1479 nentries * sizeof(struct ce_desc));
1480
1481 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1482 dest_ring->sw_index &= dest_ring->nentries_mask;
1483 dest_ring->write_index =
1484 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1485 dest_ring->write_index &= dest_ring->nentries_mask;
1486
1487 ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
1488 dest_ring->base_addr_ce_space);
1489 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1490 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1491 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1492 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1493
1494 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1495 "boot ce dest ring id %d entries %d base_addr %pK\n",
1496 ce_id, nentries, dest_ring->base_addr_owner_space);
1497
1498 return 0;
1499 }
1500
1501 static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
1502 struct ath10k_ce_ring *src_ring,
1503 u32 nentries)
1504 {
1505 src_ring->shadow_base_unaligned = kcalloc(nentries,
1506 sizeof(struct ce_desc_64),
1507 GFP_KERNEL);
1508 if (!src_ring->shadow_base_unaligned)
1509 return -ENOMEM;
1510
1511 src_ring->shadow_base = (struct ce_desc_64 *)
1512 PTR_ALIGN(src_ring->shadow_base_unaligned,
1513 CE_DESC_RING_ALIGN);
1514 return 0;
1515 }
1516
1517 static struct ath10k_ce_ring *
1518 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
1519 const struct ce_attr *attr)
1520 {
1521 struct ath10k_ce_ring *src_ring;
1522 u32 nentries = attr->src_nentries;
1523 dma_addr_t base_addr;
1524 int ret;
1525
1526 nentries = roundup_pow_of_two(nentries);
1527
1528 src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1529 nentries), GFP_KERNEL);
1530 if (src_ring == NULL)
1531 return ERR_PTR(-ENOMEM);
1532
1533 src_ring->nentries = nentries;
1534 src_ring->nentries_mask = nentries - 1;
1535
1536
1537
1538
1539
1540 src_ring->base_addr_owner_space_unaligned =
1541 dma_alloc_coherent(ar->dev,
1542 (nentries * sizeof(struct ce_desc) +
1543 CE_DESC_RING_ALIGN),
1544 &base_addr, GFP_KERNEL);
1545 if (!src_ring->base_addr_owner_space_unaligned) {
1546 kfree(src_ring);
1547 return ERR_PTR(-ENOMEM);
1548 }
1549
1550 src_ring->base_addr_ce_space_unaligned = base_addr;
1551
1552 src_ring->base_addr_owner_space =
1553 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1554 CE_DESC_RING_ALIGN);
1555 src_ring->base_addr_ce_space =
1556 ALIGN(src_ring->base_addr_ce_space_unaligned,
1557 CE_DESC_RING_ALIGN);
1558
1559 if (ar->hw_params.shadow_reg_support) {
1560 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1561 if (ret) {
1562 dma_free_coherent(ar->dev,
1563 (nentries * sizeof(struct ce_desc) +
1564 CE_DESC_RING_ALIGN),
1565 src_ring->base_addr_owner_space_unaligned,
1566 base_addr);
1567 kfree(src_ring);
1568 return ERR_PTR(ret);
1569 }
1570 }
1571
1572 return src_ring;
1573 }
1574
1575 static struct ath10k_ce_ring *
1576 ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
1577 const struct ce_attr *attr)
1578 {
1579 struct ath10k_ce_ring *src_ring;
1580 u32 nentries = attr->src_nentries;
1581 dma_addr_t base_addr;
1582 int ret;
1583
1584 nentries = roundup_pow_of_two(nentries);
1585
1586 src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1587 nentries), GFP_KERNEL);
1588 if (!src_ring)
1589 return ERR_PTR(-ENOMEM);
1590
1591 src_ring->nentries = nentries;
1592 src_ring->nentries_mask = nentries - 1;
1593
1594
1595
1596
1597 src_ring->base_addr_owner_space_unaligned =
1598 dma_alloc_coherent(ar->dev,
1599 (nentries * sizeof(struct ce_desc_64) +
1600 CE_DESC_RING_ALIGN),
1601 &base_addr, GFP_KERNEL);
1602 if (!src_ring->base_addr_owner_space_unaligned) {
1603 kfree(src_ring);
1604 return ERR_PTR(-ENOMEM);
1605 }
1606
1607 src_ring->base_addr_ce_space_unaligned = base_addr;
1608
1609 src_ring->base_addr_owner_space =
1610 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1611 CE_DESC_RING_ALIGN);
1612 src_ring->base_addr_ce_space =
1613 ALIGN(src_ring->base_addr_ce_space_unaligned,
1614 CE_DESC_RING_ALIGN);
1615
1616 if (ar->hw_params.shadow_reg_support) {
1617 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1618 if (ret) {
1619 dma_free_coherent(ar->dev,
1620 (nentries * sizeof(struct ce_desc_64) +
1621 CE_DESC_RING_ALIGN),
1622 src_ring->base_addr_owner_space_unaligned,
1623 base_addr);
1624 kfree(src_ring);
1625 return ERR_PTR(ret);
1626 }
1627 }
1628
1629 return src_ring;
1630 }
1631
1632 static struct ath10k_ce_ring *
1633 ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1634 const struct ce_attr *attr)
1635 {
1636 struct ath10k_ce_ring *dest_ring;
1637 u32 nentries;
1638 dma_addr_t base_addr;
1639
1640 nentries = roundup_pow_of_two(attr->dest_nentries);
1641
1642 dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1643 nentries), GFP_KERNEL);
1644 if (dest_ring == NULL)
1645 return ERR_PTR(-ENOMEM);
1646
1647 dest_ring->nentries = nentries;
1648 dest_ring->nentries_mask = nentries - 1;
1649
1650
1651
1652
1653
1654 dest_ring->base_addr_owner_space_unaligned =
1655 dma_alloc_coherent(ar->dev,
1656 (nentries * sizeof(struct ce_desc) +
1657 CE_DESC_RING_ALIGN),
1658 &base_addr, GFP_KERNEL);
1659 if (!dest_ring->base_addr_owner_space_unaligned) {
1660 kfree(dest_ring);
1661 return ERR_PTR(-ENOMEM);
1662 }
1663
1664 dest_ring->base_addr_ce_space_unaligned = base_addr;
1665
1666 dest_ring->base_addr_owner_space =
1667 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1668 CE_DESC_RING_ALIGN);
1669 dest_ring->base_addr_ce_space =
1670 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1671 CE_DESC_RING_ALIGN);
1672
1673 return dest_ring;
1674 }
1675
1676 static struct ath10k_ce_ring *
1677 ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
1678 const struct ce_attr *attr)
1679 {
1680 struct ath10k_ce_ring *dest_ring;
1681 u32 nentries;
1682 dma_addr_t base_addr;
1683
1684 nentries = roundup_pow_of_two(attr->dest_nentries);
1685
1686 dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1687 nentries), GFP_KERNEL);
1688 if (!dest_ring)
1689 return ERR_PTR(-ENOMEM);
1690
1691 dest_ring->nentries = nentries;
1692 dest_ring->nentries_mask = nentries - 1;
1693
1694
1695
1696
1697 dest_ring->base_addr_owner_space_unaligned =
1698 dma_alloc_coherent(ar->dev,
1699 (nentries * sizeof(struct ce_desc_64) +
1700 CE_DESC_RING_ALIGN),
1701 &base_addr, GFP_KERNEL);
1702 if (!dest_ring->base_addr_owner_space_unaligned) {
1703 kfree(dest_ring);
1704 return ERR_PTR(-ENOMEM);
1705 }
1706
1707 dest_ring->base_addr_ce_space_unaligned = base_addr;
1708
1709
1710
1711
1712 dest_ring->base_addr_owner_space =
1713 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1714 CE_DESC_RING_ALIGN);
1715 dest_ring->base_addr_ce_space =
1716 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1717 CE_DESC_RING_ALIGN);
1718
1719 return dest_ring;
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1730 const struct ce_attr *attr)
1731 {
1732 int ret;
1733
1734 if (attr->src_nentries) {
1735 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1736 if (ret) {
1737 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1738 ce_id, ret);
1739 return ret;
1740 }
1741 }
1742
1743 if (attr->dest_nentries) {
1744 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1745 if (ret) {
1746 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1747 ce_id, ret);
1748 return ret;
1749 }
1750 }
1751
1752 return 0;
1753 }
1754 EXPORT_SYMBOL(ath10k_ce_init_pipe);
1755
1756 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1757 {
1758 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1759
1760 ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
1761 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1762 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1763 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1764 }
1765
1766 static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1767 {
1768 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1769
1770 ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
1771 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1772 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1773 }
1774
1775 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1776 {
1777 ath10k_ce_deinit_src_ring(ar, ce_id);
1778 ath10k_ce_deinit_dest_ring(ar, ce_id);
1779 }
1780 EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
1781
1782 static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1783 {
1784 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1785 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1786
1787 if (ce_state->src_ring) {
1788 if (ar->hw_params.shadow_reg_support)
1789 kfree(ce_state->src_ring->shadow_base_unaligned);
1790 dma_free_coherent(ar->dev,
1791 (ce_state->src_ring->nentries *
1792 sizeof(struct ce_desc) +
1793 CE_DESC_RING_ALIGN),
1794 ce_state->src_ring->base_addr_owner_space,
1795 ce_state->src_ring->base_addr_ce_space);
1796 kfree(ce_state->src_ring);
1797 }
1798
1799 if (ce_state->dest_ring) {
1800 dma_free_coherent(ar->dev,
1801 (ce_state->dest_ring->nentries *
1802 sizeof(struct ce_desc) +
1803 CE_DESC_RING_ALIGN),
1804 ce_state->dest_ring->base_addr_owner_space,
1805 ce_state->dest_ring->base_addr_ce_space);
1806 kfree(ce_state->dest_ring);
1807 }
1808
1809 ce_state->src_ring = NULL;
1810 ce_state->dest_ring = NULL;
1811 }
1812
1813 static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
1814 {
1815 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1816 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1817
1818 if (ce_state->src_ring) {
1819 if (ar->hw_params.shadow_reg_support)
1820 kfree(ce_state->src_ring->shadow_base_unaligned);
1821 dma_free_coherent(ar->dev,
1822 (ce_state->src_ring->nentries *
1823 sizeof(struct ce_desc_64) +
1824 CE_DESC_RING_ALIGN),
1825 ce_state->src_ring->base_addr_owner_space,
1826 ce_state->src_ring->base_addr_ce_space);
1827 kfree(ce_state->src_ring);
1828 }
1829
1830 if (ce_state->dest_ring) {
1831 dma_free_coherent(ar->dev,
1832 (ce_state->dest_ring->nentries *
1833 sizeof(struct ce_desc_64) +
1834 CE_DESC_RING_ALIGN),
1835 ce_state->dest_ring->base_addr_owner_space,
1836 ce_state->dest_ring->base_addr_ce_space);
1837 kfree(ce_state->dest_ring);
1838 }
1839
1840 ce_state->src_ring = NULL;
1841 ce_state->dest_ring = NULL;
1842 }
1843
1844 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1845 {
1846 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1847 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1848
1849 ce_state->ops->ce_free_pipe(ar, ce_id);
1850 }
1851 EXPORT_SYMBOL(ath10k_ce_free_pipe);
1852
1853 void ath10k_ce_dump_registers(struct ath10k *ar,
1854 struct ath10k_fw_crash_data *crash_data)
1855 {
1856 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1857 struct ath10k_ce_crash_data ce_data;
1858 u32 addr, id;
1859
1860 lockdep_assert_held(&ar->dump_mutex);
1861
1862 ath10k_err(ar, "Copy Engine register dump:\n");
1863
1864 spin_lock_bh(&ce->ce_lock);
1865 for (id = 0; id < CE_COUNT; id++) {
1866 addr = ath10k_ce_base_address(ar, id);
1867 ce_data.base_addr = cpu_to_le32(addr);
1868
1869 ce_data.src_wr_idx =
1870 cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
1871 ce_data.src_r_idx =
1872 cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
1873 ce_data.dst_wr_idx =
1874 cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
1875 ce_data.dst_r_idx =
1876 cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
1877
1878 if (crash_data)
1879 crash_data->ce_crash_data[id] = ce_data;
1880
1881 ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
1882 le32_to_cpu(ce_data.base_addr),
1883 le32_to_cpu(ce_data.src_wr_idx),
1884 le32_to_cpu(ce_data.src_r_idx),
1885 le32_to_cpu(ce_data.dst_wr_idx),
1886 le32_to_cpu(ce_data.dst_r_idx));
1887 }
1888
1889 spin_unlock_bh(&ce->ce_lock);
1890 }
1891 EXPORT_SYMBOL(ath10k_ce_dump_registers);
1892
1893 static const struct ath10k_ce_ops ce_ops = {
1894 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
1895 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
1896 .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
1897 .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
1898 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
1899 .ce_extract_desc_data = ath10k_ce_extract_desc_data,
1900 .ce_free_pipe = _ath10k_ce_free_pipe,
1901 .ce_send_nolock = _ath10k_ce_send_nolock,
1902 .ce_set_src_ring_base_addr_hi = NULL,
1903 .ce_set_dest_ring_base_addr_hi = NULL,
1904 .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
1905 };
1906
1907 static const struct ath10k_ce_ops ce_64_ops = {
1908 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
1909 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
1910 .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
1911 .ce_completed_recv_next_nolock =
1912 _ath10k_ce_completed_recv_next_nolock_64,
1913 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
1914 .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
1915 .ce_free_pipe = _ath10k_ce_free_pipe_64,
1916 .ce_send_nolock = _ath10k_ce_send_nolock_64,
1917 .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
1918 .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
1919 .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
1920 };
1921
1922 static void ath10k_ce_set_ops(struct ath10k *ar,
1923 struct ath10k_ce_pipe *ce_state)
1924 {
1925 switch (ar->hw_rev) {
1926 case ATH10K_HW_WCN3990:
1927 ce_state->ops = &ce_64_ops;
1928 break;
1929 default:
1930 ce_state->ops = &ce_ops;
1931 break;
1932 }
1933 }
1934
1935 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1936 const struct ce_attr *attr)
1937 {
1938 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1939 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1940 int ret;
1941
1942 ath10k_ce_set_ops(ar, ce_state);
1943
1944
1945
1946
1947
1948 BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1949 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1950 BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
1951 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1952 BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1953 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1954
1955 ce_state->ar = ar;
1956 ce_state->id = ce_id;
1957 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1958 ce_state->attr_flags = attr->flags;
1959 ce_state->src_sz_max = attr->src_sz_max;
1960
1961 if (attr->src_nentries)
1962 ce_state->send_cb = attr->send_cb;
1963
1964 if (attr->dest_nentries)
1965 ce_state->recv_cb = attr->recv_cb;
1966
1967 if (attr->src_nentries) {
1968 ce_state->src_ring =
1969 ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
1970 if (IS_ERR(ce_state->src_ring)) {
1971 ret = PTR_ERR(ce_state->src_ring);
1972 ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
1973 ce_id, ret);
1974 ce_state->src_ring = NULL;
1975 return ret;
1976 }
1977 }
1978
1979 if (attr->dest_nentries) {
1980 ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
1981 ce_id,
1982 attr);
1983 if (IS_ERR(ce_state->dest_ring)) {
1984 ret = PTR_ERR(ce_state->dest_ring);
1985 ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
1986 ce_id, ret);
1987 ce_state->dest_ring = NULL;
1988 return ret;
1989 }
1990 }
1991
1992 return 0;
1993 }
1994 EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
1995
1996 void ath10k_ce_alloc_rri(struct ath10k *ar)
1997 {
1998 int i;
1999 u32 value;
2000 u32 ctrl1_regs;
2001 u32 ce_base_addr;
2002 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2003
2004 ce->vaddr_rri = dma_alloc_coherent(ar->dev,
2005 (CE_COUNT * sizeof(u32)),
2006 &ce->paddr_rri, GFP_KERNEL);
2007
2008 if (!ce->vaddr_rri)
2009 return;
2010
2011 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
2012 lower_32_bits(ce->paddr_rri));
2013 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
2014 (upper_32_bits(ce->paddr_rri) &
2015 CE_DESC_ADDR_HI_MASK));
2016
2017 for (i = 0; i < CE_COUNT; i++) {
2018 ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
2019 ce_base_addr = ath10k_ce_base_address(ar, i);
2020 value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
2021 value |= ar->hw_ce_regs->upd->mask;
2022 ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
2023 }
2024 }
2025 EXPORT_SYMBOL(ath10k_ce_alloc_rri);
2026
2027 void ath10k_ce_free_rri(struct ath10k *ar)
2028 {
2029 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2030
2031 dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
2032 ce->vaddr_rri,
2033 ce->paddr_rri);
2034 }
2035 EXPORT_SYMBOL(ath10k_ce_free_rri);