0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/atomic.h>
0020 #include <linux/bitfield.h>
0021 #include <linux/bitops.h>
0022 #include <linux/device.h>
0023 #include <linux/dma-direction.h>
0024 #include <linux/dma-mapping.h>
0025 #include <linux/gfp.h>
0026 #include <linux/err.h>
0027 #include <linux/iopoll.h>
0028 #include <linux/jiffies.h>
0029 #include <linux/kernel.h>
0030 #include <linux/kthread.h>
0031 #include <linux/list.h>
0032 #include <linux/minmax.h>
0033 #include <linux/mm.h>
0034 #include <linux/netdevice.h>
0035 #include <linux/pm_runtime.h>
0036 #include <linux/sched.h>
0037 #include <linux/skbuff.h>
0038 #include <linux/slab.h>
0039 #include <linux/spinlock.h>
0040 #include <linux/string.h>
0041 #include <linux/types.h>
0042 #include <linux/wait.h>
0043 #include <linux/workqueue.h>
0044
0045 #include "t7xx_dpmaif.h"
0046 #include "t7xx_hif_dpmaif.h"
0047 #include "t7xx_hif_dpmaif_rx.h"
0048 #include "t7xx_pci.h"
0049
0050 #define DPMAIF_BAT_COUNT 8192
0051 #define DPMAIF_FRG_COUNT 4814
0052 #define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2)
0053
0054 #define DPMAIF_BAT_CNT_THRESHOLD 30
0055 #define DPMAIF_PIT_CNT_THRESHOLD 60
0056 #define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0)
0057 #define DPMAIF_NOTIFY_RELEASE_COUNT 128
0058 #define DPMAIF_POLL_PIT_TIME_US 20
0059 #define DPMAIF_POLL_PIT_MAX_TIME_US 2000
0060 #define DPMAIF_WQ_TIME_LIMIT_MS 2
0061 #define DPMAIF_CS_RESULT_PASS 0
0062
0063
0064 #define DES_PT_PD 0
0065 #define DES_PT_MSG 1
0066
0067 #define PKT_BUF_FRAG 1
0068
0069 static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
0070 {
0071 u32 value;
0072
0073 value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
0074 value <<= 13;
0075 value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
0076 return value;
0077 }
0078
0079 static int t7xx_dpmaif_net_rx_push_thread(void *arg)
0080 {
0081 struct dpmaif_rx_queue *q = arg;
0082 struct dpmaif_ctrl *hif_ctrl;
0083 struct dpmaif_callbacks *cb;
0084
0085 hif_ctrl = q->dpmaif_ctrl;
0086 cb = hif_ctrl->callbacks;
0087
0088 while (!kthread_should_stop()) {
0089 struct sk_buff *skb;
0090 unsigned long flags;
0091
0092 if (skb_queue_empty(&q->skb_list)) {
0093 if (wait_event_interruptible(q->rx_wq,
0094 !skb_queue_empty(&q->skb_list) ||
0095 kthread_should_stop()))
0096 continue;
0097
0098 if (kthread_should_stop())
0099 break;
0100 }
0101
0102 spin_lock_irqsave(&q->skb_list.lock, flags);
0103 skb = __skb_dequeue(&q->skb_list);
0104 spin_unlock_irqrestore(&q->skb_list.lock, flags);
0105
0106 if (!skb)
0107 continue;
0108
0109 cb->recv_skb(hif_ctrl->t7xx_dev, skb);
0110 cond_resched();
0111 }
0112
0113 return 0;
0114 }
0115
0116 static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
0117 const unsigned int q_num, const unsigned int bat_cnt)
0118 {
0119 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
0120 struct dpmaif_bat_request *bat_req = rxq->bat_req;
0121 unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
0122
0123 if (!rxq->que_started) {
0124 dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
0125 return -EINVAL;
0126 }
0127
0128 old_rl_idx = bat_req->bat_release_rd_idx;
0129 old_wr_idx = bat_req->bat_wr_idx;
0130 new_wr_idx = old_wr_idx + bat_cnt;
0131
0132 if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
0133 goto err_flow;
0134
0135 if (new_wr_idx >= bat_req->bat_size_cnt) {
0136 new_wr_idx -= bat_req->bat_size_cnt;
0137 if (new_wr_idx >= old_rl_idx)
0138 goto err_flow;
0139 }
0140
0141 bat_req->bat_wr_idx = new_wr_idx;
0142 return 0;
0143
0144 err_flow:
0145 dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
0146 return -EINVAL;
0147 }
0148
0149 static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
0150 const unsigned int size, struct dpmaif_bat_skb *cur_skb)
0151 {
0152 dma_addr_t data_bus_addr;
0153 struct sk_buff *skb;
0154
0155 skb = __dev_alloc_skb(size, GFP_KERNEL);
0156 if (!skb)
0157 return false;
0158
0159 data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
0160 if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
0161 dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
0162 dev_kfree_skb_any(skb);
0163 return false;
0164 }
0165
0166 cur_skb->skb = skb;
0167 cur_skb->data_bus_addr = data_bus_addr;
0168 cur_skb->data_len = size;
0169
0170 return true;
0171 }
0172
0173 static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
0174 unsigned int index)
0175 {
0176 struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
0177
0178 if (bat_skb->skb) {
0179 dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
0180 dev_kfree_skb(bat_skb->skb);
0181 bat_skb->skb = NULL;
0182 }
0183 }
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
0201 const struct dpmaif_bat_request *bat_req,
0202 const unsigned int q_num, const unsigned int buf_cnt,
0203 const bool initial)
0204 {
0205 unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
0206 int ret;
0207
0208 if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
0209 return -EINVAL;
0210
0211
0212 bat_max_cnt = bat_req->bat_size_cnt;
0213
0214 bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
0215 bat_req->bat_wr_idx, DPMAIF_WRITE);
0216 if (buf_cnt > bat_cnt)
0217 return -ENOMEM;
0218
0219 bat_start_idx = bat_req->bat_wr_idx;
0220
0221 for (i = 0; i < buf_cnt; i++) {
0222 unsigned int cur_bat_idx = bat_start_idx + i;
0223 struct dpmaif_bat_skb *cur_skb;
0224 struct dpmaif_bat *cur_bat;
0225
0226 if (cur_bat_idx >= bat_max_cnt)
0227 cur_bat_idx -= bat_max_cnt;
0228
0229 cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
0230 if (!cur_skb->skb &&
0231 !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
0232 break;
0233
0234 cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
0235 cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
0236 cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
0237 }
0238
0239 if (!i)
0240 return -ENOMEM;
0241
0242 ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
0243 if (ret)
0244 goto err_unmap_skbs;
0245
0246 if (!initial) {
0247 unsigned int hw_wr_idx;
0248
0249 ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
0250 if (ret)
0251 goto err_unmap_skbs;
0252
0253 hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
0254 DPF_RX_QNO_DFT);
0255 if (hw_wr_idx != bat_req->bat_wr_idx) {
0256 ret = -EFAULT;
0257 dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
0258 goto err_unmap_skbs;
0259 }
0260 }
0261
0262 return 0;
0263
0264 err_unmap_skbs:
0265 while (--i > 0)
0266 t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
0267
0268 return ret;
0269 }
0270
0271 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
0272 const unsigned int rel_entry_num)
0273 {
0274 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
0275 unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
0276 int ret;
0277
0278 if (!rxq->que_started)
0279 return 0;
0280
0281 if (rel_entry_num >= rxq->pit_size_cnt) {
0282 dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
0283 return -EINVAL;
0284 }
0285
0286 old_rel_idx = rxq->pit_release_rd_idx;
0287 new_rel_idx = old_rel_idx + rel_entry_num;
0288 hw_wr_idx = rxq->pit_wr_idx;
0289 if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
0290 new_rel_idx -= rxq->pit_size_cnt;
0291
0292 ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
0293 if (ret) {
0294 dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
0295 return ret;
0296 }
0297
0298 rxq->pit_release_rd_idx = new_rel_idx;
0299 return 0;
0300 }
0301
0302 static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
0303 {
0304 unsigned long flags;
0305
0306 spin_lock_irqsave(&bat_req->mask_lock, flags);
0307 set_bit(idx, bat_req->bat_bitmap);
0308 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
0309 }
0310
0311 static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
0312 const unsigned int cur_bid)
0313 {
0314 struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
0315 struct dpmaif_bat_page *bat_page;
0316
0317 if (cur_bid >= DPMAIF_FRG_COUNT)
0318 return -EINVAL;
0319
0320 bat_page = bat_frag->bat_skb + cur_bid;
0321 if (!bat_page->page)
0322 return -EINVAL;
0323
0324 return 0;
0325 }
0326
0327 static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
0328 unsigned int index)
0329 {
0330 struct dpmaif_bat_page *bat_page = bat_page_base + index;
0331
0332 if (bat_page->page) {
0333 dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
0334 put_page(bat_page->page);
0335 bat_page->page = NULL;
0336 }
0337 }
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355 int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
0356 const unsigned int buf_cnt, const bool initial)
0357 {
0358 unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
0359 struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
0360 int ret = 0, i;
0361
0362 if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
0363 return -EINVAL;
0364
0365 buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
0366 bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
0367 DPMAIF_WRITE);
0368 if (buf_cnt > buf_space) {
0369 dev_err(dpmaif_ctrl->dev,
0370 "Requested more buffers than the space available in RX frag ring\n");
0371 return -EINVAL;
0372 }
0373
0374 for (i = 0; i < buf_cnt; i++) {
0375 struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
0376 struct dpmaif_bat *cur_bat;
0377 dma_addr_t data_base_addr;
0378
0379 if (!cur_page->page) {
0380 unsigned long offset;
0381 struct page *page;
0382 void *data;
0383
0384 data = netdev_alloc_frag(bat_req->pkt_buf_sz);
0385 if (!data)
0386 break;
0387
0388 page = virt_to_head_page(data);
0389 offset = data - page_address(page);
0390
0391 data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
0392 bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
0393 if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
0394 put_page(virt_to_head_page(data));
0395 dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
0396 break;
0397 }
0398
0399 cur_page->page = page;
0400 cur_page->data_bus_addr = data_base_addr;
0401 cur_page->offset = offset;
0402 cur_page->data_len = bat_req->pkt_buf_sz;
0403 }
0404
0405 data_base_addr = cur_page->data_bus_addr;
0406 cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
0407 cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
0408 cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
0409 cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
0410 }
0411
0412 bat_req->bat_wr_idx = cur_bat_idx;
0413
0414 if (!initial)
0415 t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
0416
0417 if (i < buf_cnt) {
0418 ret = -ENOMEM;
0419 if (initial) {
0420 while (--i > 0)
0421 t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
0422 }
0423 }
0424
0425 return ret;
0426 }
0427
0428 static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
0429 const struct dpmaif_pit *pkt_info,
0430 struct sk_buff *skb)
0431 {
0432 unsigned long long data_bus_addr, data_base_addr;
0433 struct device *dev = rxq->dpmaif_ctrl->dev;
0434 struct dpmaif_bat_page *page_info;
0435 unsigned int data_len;
0436 int data_offset;
0437
0438 page_info = rxq->bat_frag->bat_skb;
0439 page_info += t7xx_normal_pit_bid(pkt_info);
0440 dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
0441
0442 if (!page_info->page)
0443 return -EINVAL;
0444
0445 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
0446 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
0447 data_base_addr = page_info->data_bus_addr;
0448 data_offset = data_bus_addr - data_base_addr;
0449 data_offset += page_info->offset;
0450 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
0451 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
0452 data_offset, data_len, page_info->data_len);
0453
0454 page_info->page = NULL;
0455 page_info->offset = 0;
0456 page_info->data_len = 0;
0457 return 0;
0458 }
0459
0460 static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
0461 const struct dpmaif_pit *pkt_info,
0462 const struct dpmaif_cur_rx_skb_info *skb_info)
0463 {
0464 unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
0465 int ret;
0466
0467 ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
0468 if (ret < 0)
0469 return ret;
0470
0471 ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
0472 if (ret < 0) {
0473 dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
0474 return ret;
0475 }
0476
0477 t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
0478 return 0;
0479 }
0480
0481 static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
0482 {
0483 struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
0484
0485 bat_skb += cur_bid;
0486 if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
0487 return -EINVAL;
0488
0489 return 0;
0490 }
0491
0492 static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
0493 {
0494 return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
0495 }
0496
0497 static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
0498 const struct dpmaif_pit *pit)
0499 {
0500 unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
0501
0502 if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
0503 cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
0504 DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
0505 return -EFAULT;
0506
0507 rxq->expect_pit_seq++;
0508 if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
0509 rxq->expect_pit_seq = 0;
0510
0511 return 0;
0512 }
0513
0514 static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
0515 {
0516 unsigned int zero_index;
0517 unsigned long flags;
0518
0519 spin_lock_irqsave(&bat_req->mask_lock, flags);
0520
0521 zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
0522 bat_req->bat_release_rd_idx);
0523
0524 if (zero_index < bat_req->bat_size_cnt) {
0525 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
0526 return zero_index - bat_req->bat_release_rd_idx;
0527 }
0528
0529
0530 zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
0531 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
0532 return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
0533 }
0534
0535 static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
0536 const unsigned int rel_entry_num,
0537 const enum bat_type buf_type)
0538 {
0539 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
0540 unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
0541 struct dpmaif_bat_request *bat;
0542 unsigned long flags;
0543
0544 if (!rxq->que_started || !rel_entry_num)
0545 return -EINVAL;
0546
0547 if (buf_type == BAT_TYPE_FRAG) {
0548 bat = rxq->bat_frag;
0549 hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
0550 } else {
0551 bat = rxq->bat_req;
0552 hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
0553 }
0554
0555 if (rel_entry_num >= bat->bat_size_cnt)
0556 return -EINVAL;
0557
0558 old_rel_idx = bat->bat_release_rd_idx;
0559 new_rel_idx = old_rel_idx + rel_entry_num;
0560
0561
0562 if (bat->bat_wr_idx == old_rel_idx)
0563 return 0;
0564
0565 if (hw_rd_idx >= old_rel_idx) {
0566 if (new_rel_idx > hw_rd_idx)
0567 return -EINVAL;
0568 }
0569
0570 if (new_rel_idx >= bat->bat_size_cnt) {
0571 new_rel_idx -= bat->bat_size_cnt;
0572 if (new_rel_idx > hw_rd_idx)
0573 return -EINVAL;
0574 }
0575
0576 spin_lock_irqsave(&bat->mask_lock, flags);
0577 for (i = 0; i < rel_entry_num; i++) {
0578 unsigned int index = bat->bat_release_rd_idx + i;
0579
0580 if (index >= bat->bat_size_cnt)
0581 index -= bat->bat_size_cnt;
0582
0583 clear_bit(index, bat->bat_bitmap);
0584 }
0585 spin_unlock_irqrestore(&bat->mask_lock, flags);
0586
0587 bat->bat_release_rd_idx = new_rel_idx;
0588 return rel_entry_num;
0589 }
0590
0591 static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
0592 {
0593 int ret;
0594
0595 if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
0596 return 0;
0597
0598 ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
0599 if (ret)
0600 return ret;
0601
0602 rxq->pit_remain_release_cnt = 0;
0603 return 0;
0604 }
0605
0606 static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
0607 {
0608 unsigned int bid_cnt;
0609 int ret;
0610
0611 bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
0612 if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
0613 return 0;
0614
0615 ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
0616 if (ret <= 0) {
0617 dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
0618 return ret;
0619 }
0620
0621 ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
0622 if (ret < 0)
0623 dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
0624
0625 return ret;
0626 }
0627
0628 static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
0629 {
0630 unsigned int bid_cnt;
0631 int ret;
0632
0633 bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
0634 if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
0635 return 0;
0636
0637 ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
0638 if (ret <= 0) {
0639 dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
0640 return ret;
0641 }
0642
0643 return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
0644 }
0645
0646 static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
0647 const struct dpmaif_pit *msg_pit,
0648 struct dpmaif_cur_rx_skb_info *skb_info)
0649 {
0650 int header = le32_to_cpu(msg_pit->header);
0651
0652 skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
0653 skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
0654 skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
0655 skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
0656 }
0657
0658 static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
0659 const struct dpmaif_pit *pkt_info,
0660 struct dpmaif_cur_rx_skb_info *skb_info)
0661 {
0662 unsigned long long data_bus_addr, data_base_addr;
0663 struct device *dev = rxq->dpmaif_ctrl->dev;
0664 struct dpmaif_bat_skb *bat_skb;
0665 unsigned int data_len;
0666 struct sk_buff *skb;
0667 int data_offset;
0668
0669 bat_skb = rxq->bat_req->bat_skb;
0670 bat_skb += t7xx_normal_pit_bid(pkt_info);
0671 dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
0672
0673 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
0674 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
0675 data_base_addr = bat_skb->data_bus_addr;
0676 data_offset = data_bus_addr - data_base_addr;
0677 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
0678 skb = bat_skb->skb;
0679 skb->len = 0;
0680 skb_reset_tail_pointer(skb);
0681 skb_reserve(skb, data_offset);
0682
0683 if (skb->tail + data_len > skb->end) {
0684 dev_err(dev, "No buffer space available\n");
0685 return -ENOBUFS;
0686 }
0687
0688 skb_put(skb, data_len);
0689 skb_info->cur_skb = skb;
0690 bat_skb->skb = NULL;
0691 return 0;
0692 }
0693
0694 static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
0695 const struct dpmaif_pit *pkt_info,
0696 struct dpmaif_cur_rx_skb_info *skb_info)
0697 {
0698 unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
0699 int ret;
0700
0701 ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
0702 if (ret < 0)
0703 return ret;
0704
0705 ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
0706 if (ret < 0) {
0707 dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
0708 return ret;
0709 }
0710
0711 t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
0712 return 0;
0713 }
0714
0715 static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
0716 {
0717 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
0718 int ret;
0719
0720 queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
0721
0722 ret = t7xx_dpmaif_pit_release_and_add(rxq);
0723 if (ret < 0)
0724 dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
0725
0726 return ret;
0727 }
0728
0729 static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb)
0730 {
0731 unsigned long flags;
0732
0733 spin_lock_irqsave(&rxq->skb_list.lock, flags);
0734 if (rxq->skb_list.qlen < rxq->skb_list_max_len)
0735 __skb_queue_tail(&rxq->skb_list, skb);
0736 else
0737 dev_kfree_skb_any(skb);
0738 spin_unlock_irqrestore(&rxq->skb_list.lock, flags);
0739 }
0740
0741 static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
0742 struct dpmaif_cur_rx_skb_info *skb_info)
0743 {
0744 struct sk_buff *skb = skb_info->cur_skb;
0745 struct t7xx_skb_cb *skb_cb;
0746 u8 netif_id;
0747
0748 skb_info->cur_skb = NULL;
0749
0750 if (skb_info->pit_dp) {
0751 dev_kfree_skb_any(skb);
0752 return;
0753 }
0754
0755 skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
0756 CHECKSUM_NONE;
0757 netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
0758 skb_cb = T7XX_SKB_CB(skb);
0759 skb_cb->netif_idx = netif_id;
0760 skb_cb->rx_pkt_type = skb_info->pkt_type;
0761 t7xx_dpmaif_rx_skb_enqueue(rxq, skb);
0762 }
0763
0764 static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
0765 const unsigned long timeout)
0766 {
0767 unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
0768 struct device *dev = rxq->dpmaif_ctrl->dev;
0769 struct dpmaif_cur_rx_skb_info *skb_info;
0770 int ret = 0;
0771
0772 pit_len = rxq->pit_size_cnt;
0773 skb_info = &rxq->rx_data_info;
0774 cur_pit = rxq->pit_rd_idx;
0775
0776 for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
0777 struct dpmaif_pit *pkt_info;
0778 u32 val;
0779
0780 if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout))
0781 break;
0782
0783 pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
0784 if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
0785 dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
0786 return -EAGAIN;
0787 }
0788
0789 val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
0790 if (val == DES_PT_MSG) {
0791 if (skb_info->msg_pit_received)
0792 dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
0793
0794 skb_info->msg_pit_received = true;
0795 t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
0796 } else {
0797 val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
0798 if (val != PKT_BUF_FRAG)
0799 ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
0800 else if (!skb_info->cur_skb)
0801 ret = -EINVAL;
0802 else
0803 ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
0804
0805 if (ret < 0) {
0806 skb_info->err_payload = 1;
0807 dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
0808 }
0809
0810 val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
0811 if (!val) {
0812 if (!skb_info->err_payload) {
0813 t7xx_dpmaif_rx_skb(rxq, skb_info);
0814 } else if (skb_info->cur_skb) {
0815 dev_kfree_skb_any(skb_info->cur_skb);
0816 skb_info->cur_skb = NULL;
0817 }
0818
0819 memset(skb_info, 0, sizeof(*skb_info));
0820
0821 recv_skb_cnt++;
0822 if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) {
0823 wake_up_all(&rxq->rx_wq);
0824 recv_skb_cnt = 0;
0825 }
0826 }
0827 }
0828
0829 cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
0830 rxq->pit_rd_idx = cur_pit;
0831 rxq->pit_remain_release_cnt++;
0832
0833 if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
0834 ret = t7xx_dpmaifq_rx_notify_hw(rxq);
0835 if (ret < 0)
0836 break;
0837 }
0838 }
0839
0840 if (recv_skb_cnt)
0841 wake_up_all(&rxq->rx_wq);
0842
0843 if (!ret)
0844 ret = t7xx_dpmaifq_rx_notify_hw(rxq);
0845
0846 if (ret)
0847 return ret;
0848
0849 return rx_cnt;
0850 }
0851
0852 static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
0853 {
0854 unsigned int hw_wr_idx, pit_cnt;
0855
0856 if (!rxq->que_started)
0857 return 0;
0858
0859 hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
0860 pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
0861 DPMAIF_READ);
0862 rxq->pit_wr_idx = hw_wr_idx;
0863 return pit_cnt;
0864 }
0865
0866 static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
0867 const unsigned int q_num, const unsigned int budget)
0868 {
0869 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
0870 unsigned long time_limit;
0871 unsigned int cnt;
0872
0873 time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS);
0874
0875 while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) {
0876 unsigned int rd_cnt;
0877 int real_cnt;
0878
0879 rd_cnt = min(cnt, budget);
0880
0881 real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit);
0882 if (real_cnt < 0)
0883 return real_cnt;
0884
0885 if (real_cnt < cnt)
0886 return -EAGAIN;
0887 }
0888
0889 return 0;
0890 }
0891
0892 static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq)
0893 {
0894 struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
0895 int ret;
0896
0897 ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget);
0898 if (ret < 0) {
0899
0900 queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
0901 t7xx_dpmaif_clr_ip_busy_sts(hw_info);
0902 } else {
0903 t7xx_dpmaif_clr_ip_busy_sts(hw_info);
0904 t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index);
0905 }
0906 }
0907
0908 static void t7xx_dpmaif_rxq_work(struct work_struct *work)
0909 {
0910 struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
0911 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
0912 int ret;
0913
0914 atomic_set(&rxq->rx_processing, 1);
0915
0916 smp_mb();
0917
0918 if (!rxq->que_started) {
0919 atomic_set(&rxq->rx_processing, 0);
0920 dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
0921 return;
0922 }
0923
0924 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
0925 if (ret < 0 && ret != -EACCES)
0926 return;
0927
0928 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
0929 if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
0930 t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
0931
0932 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
0933 pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
0934 pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
0935 atomic_set(&rxq->rx_processing, 0);
0936 }
0937
0938 void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
0939 {
0940 struct dpmaif_rx_queue *rxq;
0941 int qno;
0942
0943 qno = ffs(que_mask) - 1;
0944 if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
0945 dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
0946 return;
0947 }
0948
0949 rxq = &dpmaif_ctrl->rxq[qno];
0950 queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
0951 }
0952
0953 static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
0954 const struct dpmaif_bat_request *bat_req)
0955 {
0956 if (bat_req->bat_base)
0957 dma_free_coherent(dpmaif_ctrl->dev,
0958 bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
0959 bat_req->bat_base, bat_req->bat_bus_addr);
0960 }
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975 int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
0976 const enum bat_type buf_type)
0977 {
0978 int sw_buf_size;
0979
0980 if (buf_type == BAT_TYPE_FRAG) {
0981 sw_buf_size = sizeof(struct dpmaif_bat_page);
0982 bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
0983 bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
0984 } else {
0985 sw_buf_size = sizeof(struct dpmaif_bat_skb);
0986 bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
0987 bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
0988 }
0989
0990 bat_req->type = buf_type;
0991 bat_req->bat_wr_idx = 0;
0992 bat_req->bat_release_rd_idx = 0;
0993
0994 bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
0995 bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
0996 &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
0997 if (!bat_req->bat_base)
0998 return -ENOMEM;
0999
1000
1001 bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
1002 GFP_KERNEL);
1003 if (!bat_req->bat_skb)
1004 goto err_free_dma_mem;
1005
1006 bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
1007 if (!bat_req->bat_bitmap)
1008 goto err_free_dma_mem;
1009
1010 spin_lock_init(&bat_req->mask_lock);
1011 atomic_set(&bat_req->refcnt, 0);
1012 return 0;
1013
1014 err_free_dma_mem:
1015 t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1016
1017 return -ENOMEM;
1018 }
1019
1020 void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
1021 {
1022 if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
1023 return;
1024
1025 bitmap_free(bat_req->bat_bitmap);
1026 bat_req->bat_bitmap = NULL;
1027
1028 if (bat_req->bat_skb) {
1029 unsigned int i;
1030
1031 for (i = 0; i < bat_req->bat_size_cnt; i++) {
1032 if (bat_req->type == BAT_TYPE_FRAG)
1033 t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1034 else
1035 t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1036 }
1037 }
1038
1039 t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1040 }
1041
1042 static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
1043 {
1044 rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
1045 rxq->pit_rd_idx = 0;
1046 rxq->pit_wr_idx = 0;
1047 rxq->pit_release_rd_idx = 0;
1048 rxq->expect_pit_seq = 0;
1049 rxq->pit_remain_release_cnt = 0;
1050 memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1051
1052 rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
1053 rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1054 &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
1055 if (!rxq->pit_base)
1056 return -ENOMEM;
1057
1058 rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
1059 atomic_inc(&rxq->bat_req->refcnt);
1060
1061 rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
1062 atomic_inc(&rxq->bat_frag->refcnt);
1063 return 0;
1064 }
1065
1066 static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
1067 {
1068 if (!rxq->dpmaif_ctrl)
1069 return;
1070
1071 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
1072 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
1073
1074 if (rxq->pit_base)
1075 dma_free_coherent(rxq->dpmaif_ctrl->dev,
1076 rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1077 rxq->pit_base, rxq->pit_bus_addr);
1078 }
1079
1080 int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
1081 {
1082 int ret;
1083
1084 ret = t7xx_dpmaif_rx_alloc(queue);
1085 if (ret < 0) {
1086 dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
1087 return ret;
1088 }
1089
1090 INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work);
1091
1092 queue->worker = alloc_workqueue("dpmaif_rx%d_worker",
1093 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index);
1094 if (!queue->worker) {
1095 ret = -ENOMEM;
1096 goto err_free_rx_buffer;
1097 }
1098
1099 init_waitqueue_head(&queue->rx_wq);
1100 skb_queue_head_init(&queue->skb_list);
1101 queue->skb_list_max_len = queue->bat_req->pkt_buf_sz;
1102 queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread,
1103 queue, "dpmaif_rx%d_push", queue->index);
1104
1105 ret = PTR_ERR_OR_ZERO(queue->rx_thread);
1106 if (ret)
1107 goto err_free_workqueue;
1108
1109 return 0;
1110
1111 err_free_workqueue:
1112 destroy_workqueue(queue->worker);
1113
1114 err_free_rx_buffer:
1115 t7xx_dpmaif_rx_buf_free(queue);
1116
1117 return ret;
1118 }
1119
1120 void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
1121 {
1122 if (queue->worker)
1123 destroy_workqueue(queue->worker);
1124
1125 if (queue->rx_thread)
1126 kthread_stop(queue->rx_thread);
1127
1128 skb_queue_purge(&queue->skb_list);
1129 t7xx_dpmaif_rx_buf_free(queue);
1130 }
1131
1132 static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
1133 {
1134 struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
1135 struct dpmaif_rx_queue *rxq;
1136 int ret;
1137
1138 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
1139 if (ret < 0 && ret != -EACCES)
1140 return;
1141
1142 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
1143
1144
1145 rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
1146 if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
1147 t7xx_dpmaif_bat_release_and_add(rxq);
1148 t7xx_dpmaif_frag_bat_release_and_add(rxq);
1149 }
1150
1151 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
1152 pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
1153 pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
1154 }
1155
1156 int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
1157 {
1158 dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
1159 WQ_MEM_RECLAIM, 1);
1160 if (!dpmaif_ctrl->bat_release_wq)
1161 return -ENOMEM;
1162
1163 INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
1164 return 0;
1165 }
1166
1167 void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
1168 {
1169 flush_work(&dpmaif_ctrl->bat_release_work);
1170
1171 if (dpmaif_ctrl->bat_release_wq) {
1172 destroy_workqueue(dpmaif_ctrl->bat_release_wq);
1173 dpmaif_ctrl->bat_release_wq = NULL;
1174 }
1175 }
1176
1177
1178
1179
1180
1181
1182
1183 void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
1184 {
1185 unsigned int i;
1186
1187 for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
1188 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
1189 int timeout, value;
1190
1191 flush_work(&rxq->dpmaif_rxq_work);
1192
1193 timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
1194 !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
1195 if (timeout)
1196 dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
1197
1198
1199 smp_mb();
1200 rxq->que_started = false;
1201 }
1202 }
1203
1204 static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
1205 {
1206 int cnt, j = 0;
1207
1208 flush_work(&rxq->dpmaif_rxq_work);
1209 rxq->que_started = false;
1210
1211 do {
1212 cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
1213 rxq->pit_wr_idx, DPMAIF_READ);
1214
1215 if (++j >= DPMAIF_MAX_CHECK_COUNT) {
1216 dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
1217 break;
1218 }
1219 } while (cnt);
1220
1221 memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
1222 memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
1223 bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
1224 memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1225
1226 rxq->pit_rd_idx = 0;
1227 rxq->pit_wr_idx = 0;
1228 rxq->pit_release_rd_idx = 0;
1229 rxq->expect_pit_seq = 0;
1230 rxq->pit_remain_release_cnt = 0;
1231 rxq->bat_req->bat_release_rd_idx = 0;
1232 rxq->bat_req->bat_wr_idx = 0;
1233 rxq->bat_frag->bat_release_rd_idx = 0;
1234 rxq->bat_frag->bat_wr_idx = 0;
1235 }
1236
1237 void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
1238 {
1239 int i;
1240
1241 for (i = 0; i < DPMAIF_RXQ_NUM; i++)
1242 t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
1243 }