0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/atomic.h>
0019 #include <linux/bitfield.h>
0020 #include <linux/delay.h>
0021 #include <linux/device.h>
0022 #include <linux/dma-direction.h>
0023 #include <linux/dma-mapping.h>
0024 #include <linux/err.h>
0025 #include <linux/gfp.h>
0026 #include <linux/kernel.h>
0027 #include <linux/kthread.h>
0028 #include <linux/list.h>
0029 #include <linux/minmax.h>
0030 #include <linux/netdevice.h>
0031 #include <linux/pm_runtime.h>
0032 #include <linux/sched.h>
0033 #include <linux/spinlock.h>
0034 #include <linux/skbuff.h>
0035 #include <linux/types.h>
0036 #include <linux/wait.h>
0037 #include <linux/workqueue.h>
0038
0039 #include "t7xx_dpmaif.h"
0040 #include "t7xx_hif_dpmaif.h"
0041 #include "t7xx_hif_dpmaif_tx.h"
0042 #include "t7xx_pci.h"
0043
0044 #define DPMAIF_SKB_TX_BURST_CNT 5
0045 #define DPMAIF_DRB_LIST_LEN 6144
0046
0047
0048 #define DES_DTYP_PD 0
0049 #define DES_DTYP_MSG 1
0050
0051 static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
0052 unsigned int q_num)
0053 {
0054 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
0055 unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
0056 unsigned long flags;
0057
0058 if (!txq->que_started)
0059 return 0;
0060
0061 old_sw_rd_idx = txq->drb_rd_idx;
0062 new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
0063 if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
0064 dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
0065 return 0;
0066 }
0067
0068 if (old_sw_rd_idx <= new_hw_rd_idx)
0069 drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
0070 else
0071 drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
0072
0073 spin_lock_irqsave(&txq->tx_lock, flags);
0074 txq->drb_rd_idx = new_hw_rd_idx;
0075 spin_unlock_irqrestore(&txq->tx_lock, flags);
0076
0077 return drb_cnt;
0078 }
0079
0080 static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
0081 unsigned int q_num, unsigned int release_cnt)
0082 {
0083 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
0084 struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
0085 struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
0086 struct dpmaif_drb *cur_drb, *drb_base;
0087 unsigned int drb_cnt, i, cur_idx;
0088 unsigned long flags;
0089
0090 drb_skb_base = txq->drb_skb_base;
0091 drb_base = txq->drb_base;
0092
0093 spin_lock_irqsave(&txq->tx_lock, flags);
0094 drb_cnt = txq->drb_size_cnt;
0095 cur_idx = txq->drb_release_rd_idx;
0096 spin_unlock_irqrestore(&txq->tx_lock, flags);
0097
0098 for (i = 0; i < release_cnt; i++) {
0099 cur_drb = drb_base + cur_idx;
0100 if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
0101 cur_drb_skb = drb_skb_base + cur_idx;
0102 if (!cur_drb_skb->is_msg)
0103 dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
0104 cur_drb_skb->data_len, DMA_TO_DEVICE);
0105
0106 if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
0107 if (!cur_drb_skb->skb) {
0108 dev_err(dpmaif_ctrl->dev,
0109 "txq%u: DRB check fail, invalid skb\n", q_num);
0110 continue;
0111 }
0112
0113 dev_kfree_skb_any(cur_drb_skb->skb);
0114 }
0115
0116 cur_drb_skb->skb = NULL;
0117 }
0118
0119 spin_lock_irqsave(&txq->tx_lock, flags);
0120 cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
0121 txq->drb_release_rd_idx = cur_idx;
0122 spin_unlock_irqrestore(&txq->tx_lock, flags);
0123
0124 if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
0125 cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
0126 }
0127
0128 if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
0129 dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
0130
0131 return i;
0132 }
0133
0134 static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
0135 unsigned int q_num, unsigned int budget)
0136 {
0137 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
0138 unsigned int rel_cnt, real_rel_cnt;
0139
0140
0141 t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
0142
0143 rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
0144 txq->drb_rd_idx, DPMAIF_READ);
0145
0146 real_rel_cnt = min_not_zero(budget, rel_cnt);
0147 if (real_rel_cnt)
0148 real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
0149
0150 return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
0151 }
0152
0153 static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
0154 {
0155 return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
0156 }
0157
0158 static void t7xx_dpmaif_tx_done(struct work_struct *work)
0159 {
0160 struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
0161 struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
0162 struct dpmaif_hw_info *hw_info;
0163 int ret;
0164
0165 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
0166 if (ret < 0 && ret != -EACCES)
0167 return;
0168
0169
0170 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
0171 if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
0172 hw_info = &dpmaif_ctrl->hw_info;
0173 ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
0174 if (ret == -EAGAIN ||
0175 (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
0176 t7xx_dpmaif_drb_ring_not_empty(txq))) {
0177 queue_work(dpmaif_ctrl->txq[txq->index].worker,
0178 &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
0179
0180 t7xx_dpmaif_clr_ip_busy_sts(hw_info);
0181 } else {
0182 t7xx_dpmaif_clr_ip_busy_sts(hw_info);
0183 t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
0184 }
0185 }
0186
0187 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
0188 pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
0189 pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
0190 }
0191
0192 static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
0193 unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
0194 unsigned int channel_id)
0195 {
0196 struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
0197 struct dpmaif_drb *drb = drb_base + cur_idx;
0198
0199 drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
0200 FIELD_PREP(DRB_HDR_CONT, 1) |
0201 FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
0202
0203 drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
0204 FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
0205 FIELD_PREP(DRB_MSG_L4_CHK, 1));
0206 }
0207
0208 static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
0209 unsigned int cur_idx, dma_addr_t data_addr,
0210 unsigned int pkt_size, bool last_one)
0211 {
0212 struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
0213 struct dpmaif_drb *drb = drb_base + cur_idx;
0214 u32 header;
0215
0216 header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
0217 if (!last_one)
0218 header |= FIELD_PREP(DRB_HDR_CONT, 1);
0219
0220 drb->header = cpu_to_le32(header);
0221 drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
0222 drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
0223 }
0224
0225 static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
0226 unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
0227 bool is_frag, bool is_last_one, dma_addr_t bus_addr,
0228 unsigned int data_len)
0229 {
0230 struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
0231 struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
0232
0233 drb_skb->skb = skb;
0234 drb_skb->bus_addr = bus_addr;
0235 drb_skb->data_len = data_len;
0236 drb_skb->index = cur_idx;
0237 drb_skb->is_msg = is_msg;
0238 drb_skb->is_frag = is_frag;
0239 drb_skb->is_last = is_last_one;
0240 }
0241
0242 static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
0243 {
0244 struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
0245 unsigned int wr_cnt, send_cnt, payload_cnt;
0246 unsigned int cur_idx, drb_wr_idx_backup;
0247 struct skb_shared_info *shinfo;
0248 struct dpmaif_tx_queue *txq;
0249 struct t7xx_skb_cb *skb_cb;
0250 unsigned long flags;
0251
0252 skb_cb = T7XX_SKB_CB(skb);
0253 txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
0254 if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
0255 return -ENODEV;
0256
0257 atomic_set(&txq->tx_processing, 1);
0258
0259 smp_mb();
0260
0261 shinfo = skb_shinfo(skb);
0262 if (shinfo->frag_list)
0263 dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
0264
0265 payload_cnt = shinfo->nr_frags + 1;
0266
0267 send_cnt = payload_cnt + 1;
0268
0269 spin_lock_irqsave(&txq->tx_lock, flags);
0270 cur_idx = txq->drb_wr_idx;
0271 drb_wr_idx_backup = cur_idx;
0272 txq->drb_wr_idx += send_cnt;
0273 if (txq->drb_wr_idx >= txq->drb_size_cnt)
0274 txq->drb_wr_idx -= txq->drb_size_cnt;
0275 t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
0276 t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
0277 spin_unlock_irqrestore(&txq->tx_lock, flags);
0278
0279 for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
0280 bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
0281 unsigned int data_len;
0282 dma_addr_t bus_addr;
0283 void *data_addr;
0284
0285 if (!wr_cnt) {
0286 data_len = skb_headlen(skb);
0287 data_addr = skb->data;
0288 is_frag = false;
0289 } else {
0290 skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
0291
0292 data_len = skb_frag_size(frag);
0293 data_addr = skb_frag_address(frag);
0294 is_frag = true;
0295 }
0296
0297 bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
0298 if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
0299 goto unmap_buffers;
0300
0301 cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
0302
0303 spin_lock_irqsave(&txq->tx_lock, flags);
0304 t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
0305 is_last_one);
0306 t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
0307 is_last_one, bus_addr, data_len);
0308 spin_unlock_irqrestore(&txq->tx_lock, flags);
0309 }
0310
0311 if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
0312 cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
0313
0314 atomic_set(&txq->tx_processing, 0);
0315
0316 return 0;
0317
0318 unmap_buffers:
0319 while (wr_cnt--) {
0320 struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
0321
0322 cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
0323 drb_skb += cur_idx;
0324 dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
0325 drb_skb->data_len, DMA_TO_DEVICE);
0326 }
0327
0328 txq->drb_wr_idx = drb_wr_idx_backup;
0329 atomic_set(&txq->tx_processing, 0);
0330
0331 return -ENOMEM;
0332 }
0333
0334 static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
0335 {
0336 int i;
0337
0338 for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
0339 if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
0340 return false;
0341 }
0342
0343 return true;
0344 }
0345
0346
0347 static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
0348 {
0349 struct dpmaif_tx_queue *txq;
0350
0351 txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
0352 if (!txq->que_started)
0353 return NULL;
0354
0355 return txq;
0356 }
0357
0358 static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
0359 {
0360 return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
0361 txq->drb_wr_idx, DPMAIF_WRITE);
0362 }
0363
0364 static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
0365 {
0366
0367 return skb_shinfo(skb)->nr_frags + 2;
0368 }
0369
0370 static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
0371 {
0372 unsigned int drb_remain_cnt, i;
0373 unsigned int send_drb_cnt;
0374 int drb_cnt = 0;
0375 int ret = 0;
0376
0377 drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
0378
0379 for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
0380 struct sk_buff *skb;
0381
0382 skb = skb_peek(&txq->tx_skb_head);
0383 if (!skb)
0384 break;
0385
0386 send_drb_cnt = t7xx_skb_drb_cnt(skb);
0387 if (drb_remain_cnt < send_drb_cnt) {
0388 drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
0389 continue;
0390 }
0391
0392 drb_remain_cnt -= send_drb_cnt;
0393
0394 ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
0395 if (ret < 0) {
0396 dev_err(txq->dpmaif_ctrl->dev,
0397 "Failed to add skb to device's ring: %d\n", ret);
0398 break;
0399 }
0400
0401 drb_cnt += send_drb_cnt;
0402 skb_unlink(skb, &txq->tx_skb_head);
0403 }
0404
0405 if (drb_cnt > 0)
0406 return drb_cnt;
0407
0408 return ret;
0409 }
0410
0411 static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
0412 {
0413 bool wait_disable_sleep = true;
0414
0415 do {
0416 struct dpmaif_tx_queue *txq;
0417 int drb_send_cnt;
0418
0419 txq = t7xx_select_tx_queue(dpmaif_ctrl);
0420 if (!txq)
0421 return;
0422
0423 drb_send_cnt = t7xx_txq_burst_send_skb(txq);
0424 if (drb_send_cnt <= 0) {
0425 usleep_range(10, 20);
0426 cond_resched();
0427 continue;
0428 }
0429
0430
0431 if (wait_disable_sleep) {
0432 if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
0433 return;
0434
0435 wait_disable_sleep = false;
0436 }
0437
0438 t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
0439 drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
0440
0441 cond_resched();
0442 } while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
0443 (dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
0444 }
0445
0446 static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
0447 {
0448 struct dpmaif_ctrl *dpmaif_ctrl = arg;
0449 int ret;
0450
0451 while (!kthread_should_stop()) {
0452 if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
0453 dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
0454 if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
0455 (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
0456 dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
0457 kthread_should_stop()))
0458 continue;
0459
0460 if (kthread_should_stop())
0461 break;
0462 }
0463
0464 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
0465 if (ret < 0 && ret != -EACCES)
0466 return ret;
0467
0468 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
0469 t7xx_do_tx_hw_push(dpmaif_ctrl);
0470 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
0471 pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
0472 pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
0473 }
0474
0475 return 0;
0476 }
0477
0478 int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
0479 {
0480 init_waitqueue_head(&dpmaif_ctrl->tx_wq);
0481 dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
0482 dpmaif_ctrl, "dpmaif_tx_hw_push");
0483 return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
0484 }
0485
0486 void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
0487 {
0488 if (dpmaif_ctrl->tx_thread)
0489 kthread_stop(dpmaif_ctrl->tx_thread);
0490 }
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507 int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
0508 struct sk_buff *skb)
0509 {
0510 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
0511 struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
0512 struct t7xx_skb_cb *skb_cb;
0513
0514 if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
0515 cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
0516 return -EBUSY;
0517 }
0518
0519 skb_cb = T7XX_SKB_CB(skb);
0520 skb_cb->txq_number = txq_number;
0521 skb_queue_tail(&txq->tx_skb_head, skb);
0522 wake_up(&dpmaif_ctrl->tx_wq);
0523
0524 return 0;
0525 }
0526
0527 void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
0528 {
0529 int i;
0530
0531 for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
0532 if (que_mask & BIT(i))
0533 queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
0534 }
0535 }
0536
0537 static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
0538 {
0539 size_t brb_skb_size, brb_pd_size;
0540
0541 brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
0542 brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
0543
0544 txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
0545
0546
0547 txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
0548 &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
0549 if (!txq->drb_base)
0550 return -ENOMEM;
0551
0552
0553 txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
0554 if (!txq->drb_skb_base) {
0555 dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
0556 txq->drb_base, txq->drb_bus_addr);
0557 return -ENOMEM;
0558 }
0559
0560 return 0;
0561 }
0562
0563 static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
0564 {
0565 struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
0566 unsigned int i;
0567
0568 if (!drb_skb_base)
0569 return;
0570
0571 for (i = 0; i < txq->drb_size_cnt; i++) {
0572 drb_skb = drb_skb_base + i;
0573 if (!drb_skb->skb)
0574 continue;
0575
0576 if (!drb_skb->is_msg)
0577 dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
0578 drb_skb->data_len, DMA_TO_DEVICE);
0579
0580 if (drb_skb->is_last) {
0581 dev_kfree_skb(drb_skb->skb);
0582 drb_skb->skb = NULL;
0583 }
0584 }
0585 }
0586
0587 static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
0588 {
0589 if (txq->drb_base)
0590 dma_free_coherent(txq->dpmaif_ctrl->dev,
0591 txq->drb_size_cnt * sizeof(struct dpmaif_drb),
0592 txq->drb_base, txq->drb_bus_addr);
0593
0594 t7xx_dpmaif_tx_free_drb_skb(txq);
0595 }
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
0608 {
0609 int ret;
0610
0611 skb_queue_head_init(&txq->tx_skb_head);
0612 init_waitqueue_head(&txq->req_wq);
0613 atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
0614
0615 ret = t7xx_dpmaif_tx_drb_buf_init(txq);
0616 if (ret) {
0617 dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
0618 return ret;
0619 }
0620
0621 txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM |
0622 (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index);
0623 if (!txq->worker)
0624 return -ENOMEM;
0625
0626 INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
0627 spin_lock_init(&txq->tx_lock);
0628
0629 return 0;
0630 }
0631
0632 void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
0633 {
0634 if (txq->worker)
0635 destroy_workqueue(txq->worker);
0636
0637 skb_queue_purge(&txq->tx_skb_head);
0638 t7xx_dpmaif_tx_drb_buf_rel(txq);
0639 }
0640
0641 void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
0642 {
0643 int i;
0644
0645 for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
0646 struct dpmaif_tx_queue *txq;
0647 int count = 0;
0648
0649 txq = &dpmaif_ctrl->txq[i];
0650 txq->que_started = false;
0651
0652 smp_mb();
0653
0654
0655 while (atomic_read(&txq->tx_processing)) {
0656 if (++count >= DPMAIF_MAX_CHECK_COUNT) {
0657 dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
0658 break;
0659 }
0660 }
0661 }
0662 }
0663
0664 static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
0665 {
0666 txq->que_started = false;
0667
0668 cancel_work_sync(&txq->dpmaif_tx_work);
0669 flush_work(&txq->dpmaif_tx_work);
0670 t7xx_dpmaif_tx_free_drb_skb(txq);
0671
0672 txq->drb_rd_idx = 0;
0673 txq->drb_wr_idx = 0;
0674 txq->drb_release_rd_idx = 0;
0675 }
0676
0677 void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
0678 {
0679 int i;
0680
0681 for (i = 0; i < DPMAIF_TXQ_NUM; i++)
0682 t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);
0683 }