0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/bits.h>
0020 #include <linux/bitops.h>
0021 #include <linux/delay.h>
0022 #include <linux/device.h>
0023 #include <linux/dmapool.h>
0024 #include <linux/dma-mapping.h>
0025 #include <linux/dma-direction.h>
0026 #include <linux/gfp.h>
0027 #include <linux/io.h>
0028 #include <linux/io-64-nonatomic-lo-hi.h>
0029 #include <linux/iopoll.h>
0030 #include <linux/irqreturn.h>
0031 #include <linux/kernel.h>
0032 #include <linux/kthread.h>
0033 #include <linux/list.h>
0034 #include <linux/netdevice.h>
0035 #include <linux/pci.h>
0036 #include <linux/pm_runtime.h>
0037 #include <linux/sched.h>
0038 #include <linux/skbuff.h>
0039 #include <linux/slab.h>
0040 #include <linux/spinlock.h>
0041 #include <linux/types.h>
0042 #include <linux/wait.h>
0043 #include <linux/workqueue.h>
0044
0045 #include "t7xx_cldma.h"
0046 #include "t7xx_hif_cldma.h"
0047 #include "t7xx_mhccif.h"
0048 #include "t7xx_pci.h"
0049 #include "t7xx_pcie_mac.h"
0050 #include "t7xx_port_proxy.h"
0051 #include "t7xx_reg.h"
0052 #include "t7xx_state_monitor.h"
0053
0054 #define MAX_TX_BUDGET 16
0055 #define MAX_RX_BUDGET 16
0056
0057 #define CHECK_Q_STOP_TIMEOUT_US 1000000
0058 #define CHECK_Q_STOP_STEP_US 10000
0059
0060 #define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
0061
0062 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
0063 enum mtk_txrx tx_rx, unsigned int index)
0064 {
0065 queue->dir = tx_rx;
0066 queue->index = index;
0067 queue->md_ctrl = md_ctrl;
0068 queue->tr_ring = NULL;
0069 queue->tr_done = NULL;
0070 queue->tx_next = NULL;
0071 }
0072
0073 static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
0074 enum mtk_txrx tx_rx, unsigned int index)
0075 {
0076 md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
0077 init_waitqueue_head(&queue->req_wq);
0078 spin_lock_init(&queue->ring_lock);
0079 }
0080
0081 static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
0082 {
0083 gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
0084 gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
0085 }
0086
0087 static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
0088 {
0089 gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
0090 gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
0091 }
0092
0093 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
0094 size_t size, gfp_t gfp_mask)
0095 {
0096 req->skb = __dev_alloc_skb(size, gfp_mask);
0097 if (!req->skb)
0098 return -ENOMEM;
0099
0100 req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
0101 if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
0102 dev_kfree_skb_any(req->skb);
0103 req->skb = NULL;
0104 req->mapped_buff = 0;
0105 dev_err(md_ctrl->dev, "DMA mapping failed\n");
0106 return -ENOMEM;
0107 }
0108
0109 return 0;
0110 }
0111
0112 static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
0113 {
0114 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0115 unsigned int hwo_polling_count = 0;
0116 struct t7xx_cldma_hw *hw_info;
0117 bool rx_not_done = true;
0118 unsigned long flags;
0119 int count = 0;
0120
0121 hw_info = &md_ctrl->hw_info;
0122
0123 do {
0124 struct cldma_request *req;
0125 struct cldma_gpd *gpd;
0126 struct sk_buff *skb;
0127 int ret;
0128
0129 req = queue->tr_done;
0130 if (!req)
0131 return -ENODATA;
0132
0133 gpd = req->gpd;
0134 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
0135 dma_addr_t gpd_addr;
0136
0137 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
0138 dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
0139 return -ENODEV;
0140 }
0141
0142 gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
0143 queue->index * sizeof(u64));
0144 if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
0145 return 0;
0146
0147 udelay(1);
0148 continue;
0149 }
0150
0151 hwo_polling_count = 0;
0152 skb = req->skb;
0153
0154 if (req->mapped_buff) {
0155 dma_unmap_single(md_ctrl->dev, req->mapped_buff,
0156 queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
0157 req->mapped_buff = 0;
0158 }
0159
0160 skb->len = 0;
0161 skb_reset_tail_pointer(skb);
0162 skb_put(skb, le16_to_cpu(gpd->data_buff_len));
0163
0164 ret = md_ctrl->recv_skb(queue, skb);
0165
0166 if (ret < 0)
0167 return ret;
0168
0169 req->skb = NULL;
0170 t7xx_cldma_gpd_set_data_ptr(gpd, 0);
0171
0172 spin_lock_irqsave(&queue->ring_lock, flags);
0173 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
0174 spin_unlock_irqrestore(&queue->ring_lock, flags);
0175 req = queue->rx_refill;
0176
0177 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
0178 if (ret)
0179 return ret;
0180
0181 gpd = req->gpd;
0182 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
0183 gpd->data_buff_len = 0;
0184 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
0185
0186 spin_lock_irqsave(&queue->ring_lock, flags);
0187 queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
0188 spin_unlock_irqrestore(&queue->ring_lock, flags);
0189
0190 rx_not_done = ++count < budget || !need_resched();
0191 } while (rx_not_done);
0192
0193 *over_budget = true;
0194 return 0;
0195 }
0196
0197 static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
0198 {
0199 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0200 struct t7xx_cldma_hw *hw_info;
0201 unsigned int pending_rx_int;
0202 bool over_budget = false;
0203 unsigned long flags;
0204 int ret;
0205
0206 hw_info = &md_ctrl->hw_info;
0207
0208 do {
0209 ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
0210 if (ret == -ENODATA)
0211 return 0;
0212 else if (ret)
0213 return ret;
0214
0215 pending_rx_int = 0;
0216
0217 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0218 if (md_ctrl->rxq_active & BIT(queue->index)) {
0219 if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
0220 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
0221
0222 pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
0223 MTK_RX);
0224 if (pending_rx_int) {
0225 t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
0226
0227 if (over_budget) {
0228 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0229 return -EAGAIN;
0230 }
0231 }
0232 }
0233 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0234 } while (pending_rx_int);
0235
0236 return 0;
0237 }
0238
0239 static void t7xx_cldma_rx_done(struct work_struct *work)
0240 {
0241 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
0242 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0243 int value;
0244
0245 value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
0246 if (value && md_ctrl->rxq_active & BIT(queue->index)) {
0247 queue_work(queue->worker, &queue->cldma_work);
0248 return;
0249 }
0250
0251 t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
0252 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
0253 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
0254 pm_runtime_mark_last_busy(md_ctrl->dev);
0255 pm_runtime_put_autosuspend(md_ctrl->dev);
0256 }
0257
0258 static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
0259 {
0260 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0261 unsigned int dma_len, count = 0;
0262 struct cldma_request *req;
0263 struct cldma_gpd *gpd;
0264 unsigned long flags;
0265 dma_addr_t dma_free;
0266 struct sk_buff *skb;
0267
0268 while (!kthread_should_stop()) {
0269 spin_lock_irqsave(&queue->ring_lock, flags);
0270 req = queue->tr_done;
0271 if (!req) {
0272 spin_unlock_irqrestore(&queue->ring_lock, flags);
0273 break;
0274 }
0275 gpd = req->gpd;
0276 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
0277 spin_unlock_irqrestore(&queue->ring_lock, flags);
0278 break;
0279 }
0280 queue->budget++;
0281 dma_free = req->mapped_buff;
0282 dma_len = le16_to_cpu(gpd->data_buff_len);
0283 skb = req->skb;
0284 req->skb = NULL;
0285 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
0286 spin_unlock_irqrestore(&queue->ring_lock, flags);
0287
0288 count++;
0289 dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
0290 dev_kfree_skb_any(skb);
0291 }
0292
0293 if (count)
0294 wake_up_nr(&queue->req_wq, count);
0295
0296 return count;
0297 }
0298
0299 static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
0300 {
0301 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0302 struct cldma_request *req;
0303 dma_addr_t ul_curr_addr;
0304 unsigned long flags;
0305 bool pending_gpd;
0306
0307 if (!(md_ctrl->txq_active & BIT(queue->index)))
0308 return;
0309
0310 spin_lock_irqsave(&queue->ring_lock, flags);
0311 req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
0312 spin_unlock_irqrestore(&queue->ring_lock, flags);
0313
0314 pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
0315
0316 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0317 if (pending_gpd) {
0318 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
0319
0320
0321 ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
0322 queue->index * sizeof(u64));
0323 if (req->gpd_addr != ul_curr_addr) {
0324 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0325 dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
0326 md_ctrl->hif_id, queue->index);
0327 return;
0328 }
0329
0330 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
0331 }
0332 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0333 }
0334
0335 static void t7xx_cldma_tx_done(struct work_struct *work)
0336 {
0337 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
0338 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0339 struct t7xx_cldma_hw *hw_info;
0340 unsigned int l2_tx_int;
0341 unsigned long flags;
0342
0343 hw_info = &md_ctrl->hw_info;
0344 t7xx_cldma_gpd_tx_collect(queue);
0345 l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
0346 MTK_TX);
0347 if (l2_tx_int & EQ_STA_BIT(queue->index)) {
0348 t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
0349 t7xx_cldma_txq_empty_hndl(queue);
0350 }
0351
0352 if (l2_tx_int & BIT(queue->index)) {
0353 t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
0354 queue_work(queue->worker, &queue->cldma_work);
0355 return;
0356 }
0357
0358 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0359 if (md_ctrl->txq_active & BIT(queue->index)) {
0360 t7xx_cldma_clear_ip_busy(hw_info);
0361 t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
0362 t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
0363 }
0364 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0365
0366 pm_runtime_mark_last_busy(md_ctrl->dev);
0367 pm_runtime_put_autosuspend(md_ctrl->dev);
0368 }
0369
0370 static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
0371 struct cldma_ring *ring, enum dma_data_direction tx_rx)
0372 {
0373 struct cldma_request *req_cur, *req_next;
0374
0375 list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
0376 if (req_cur->mapped_buff && req_cur->skb) {
0377 dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
0378 ring->pkt_size, tx_rx);
0379 req_cur->mapped_buff = 0;
0380 }
0381
0382 dev_kfree_skb_any(req_cur->skb);
0383
0384 if (req_cur->gpd)
0385 dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
0386
0387 list_del(&req_cur->entry);
0388 kfree(req_cur);
0389 }
0390 }
0391
0392 static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
0393 {
0394 struct cldma_request *req;
0395 int val;
0396
0397 req = kzalloc(sizeof(*req), GFP_KERNEL);
0398 if (!req)
0399 return NULL;
0400
0401 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
0402 if (!req->gpd)
0403 goto err_free_req;
0404
0405 val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
0406 if (val)
0407 goto err_free_pool;
0408
0409 return req;
0410
0411 err_free_pool:
0412 dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
0413
0414 err_free_req:
0415 kfree(req);
0416
0417 return NULL;
0418 }
0419
0420 static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
0421 {
0422 struct cldma_request *req;
0423 struct cldma_gpd *gpd;
0424 int i;
0425
0426 INIT_LIST_HEAD(&ring->gpd_ring);
0427 ring->length = MAX_RX_BUDGET;
0428
0429 for (i = 0; i < ring->length; i++) {
0430 req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
0431 if (!req) {
0432 t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
0433 return -ENOMEM;
0434 }
0435
0436 gpd = req->gpd;
0437 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
0438 gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
0439 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
0440 INIT_LIST_HEAD(&req->entry);
0441 list_add_tail(&req->entry, &ring->gpd_ring);
0442 }
0443
0444
0445 list_for_each_entry(req, &ring->gpd_ring, entry) {
0446 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
0447 gpd = req->gpd;
0448 }
0449
0450 return 0;
0451 }
0452
0453 static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
0454 {
0455 struct cldma_request *req;
0456
0457 req = kzalloc(sizeof(*req), GFP_KERNEL);
0458 if (!req)
0459 return NULL;
0460
0461 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
0462 if (!req->gpd) {
0463 kfree(req);
0464 return NULL;
0465 }
0466
0467 return req;
0468 }
0469
0470 static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
0471 {
0472 struct cldma_request *req;
0473 struct cldma_gpd *gpd;
0474 int i;
0475
0476 INIT_LIST_HEAD(&ring->gpd_ring);
0477 ring->length = MAX_TX_BUDGET;
0478
0479 for (i = 0; i < ring->length; i++) {
0480 req = t7xx_alloc_tx_request(md_ctrl);
0481 if (!req) {
0482 t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
0483 return -ENOMEM;
0484 }
0485
0486 gpd = req->gpd;
0487 gpd->flags = GPD_FLAGS_IOC;
0488 INIT_LIST_HEAD(&req->entry);
0489 list_add_tail(&req->entry, &ring->gpd_ring);
0490 }
0491
0492
0493 list_for_each_entry(req, &ring->gpd_ring, entry) {
0494 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
0495 gpd = req->gpd;
0496 }
0497
0498 return 0;
0499 }
0500
0501
0502
0503
0504
0505
0506
0507 static void t7xx_cldma_q_reset(struct cldma_queue *queue)
0508 {
0509 struct cldma_request *req;
0510
0511 req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
0512 queue->tr_done = req;
0513 queue->budget = queue->tr_ring->length;
0514
0515 if (queue->dir == MTK_TX)
0516 queue->tx_next = req;
0517 else
0518 queue->rx_refill = req;
0519 }
0520
0521 static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
0522 {
0523 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0524
0525 queue->dir = MTK_RX;
0526 queue->tr_ring = &md_ctrl->rx_ring[queue->index];
0527 t7xx_cldma_q_reset(queue);
0528 }
0529
0530 static void t7xx_cldma_txq_init(struct cldma_queue *queue)
0531 {
0532 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0533
0534 queue->dir = MTK_TX;
0535 queue->tr_ring = &md_ctrl->tx_ring[queue->index];
0536 t7xx_cldma_q_reset(queue);
0537 }
0538
0539 static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
0540 {
0541 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
0542 }
0543
0544 static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
0545 {
0546 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
0547 }
0548
0549 static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
0550 {
0551 unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
0552 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
0553 int i;
0554
0555
0556 l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
0557 l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
0558 l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
0559 l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
0560 l2_tx_int &= ~l2_tx_int_msk;
0561 l2_rx_int &= ~l2_rx_int_msk;
0562
0563 if (l2_tx_int) {
0564 if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
0565
0566 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
0567 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
0568 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
0569 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
0570 }
0571
0572 t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
0573 if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
0574 for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
0575 if (i < CLDMA_TXQ_NUM) {
0576 pm_runtime_get(md_ctrl->dev);
0577 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
0578 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
0579 queue_work(md_ctrl->txq[i].worker,
0580 &md_ctrl->txq[i].cldma_work);
0581 } else {
0582 t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
0583 }
0584 }
0585 }
0586 }
0587
0588 if (l2_rx_int) {
0589 if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
0590
0591 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
0592 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
0593 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
0594 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
0595 }
0596
0597 t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
0598 if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
0599 l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
0600 for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
0601 pm_runtime_get(md_ctrl->dev);
0602 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
0603 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
0604 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
0605 }
0606 }
0607 }
0608 }
0609
0610 static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
0611 {
0612 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
0613 unsigned int tx_active;
0614 unsigned int rx_active;
0615
0616 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
0617 return false;
0618
0619 tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
0620 rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
0621
0622 return tx_active || rx_active;
0623 }
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
0637 {
0638 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
0639 bool active;
0640 int i, ret;
0641
0642 md_ctrl->rxq_active = 0;
0643 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
0644 md_ctrl->txq_active = 0;
0645 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
0646 md_ctrl->txq_started = 0;
0647 t7xx_cldma_disable_irq(md_ctrl);
0648 t7xx_cldma_hw_stop(hw_info, MTK_RX);
0649 t7xx_cldma_hw_stop(hw_info, MTK_TX);
0650 t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
0651 t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
0652
0653 if (md_ctrl->is_late_init) {
0654 for (i = 0; i < CLDMA_TXQ_NUM; i++)
0655 flush_work(&md_ctrl->txq[i].cldma_work);
0656
0657 for (i = 0; i < CLDMA_RXQ_NUM; i++)
0658 flush_work(&md_ctrl->rxq[i].cldma_work);
0659 }
0660
0661 ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
0662 CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
0663 if (ret)
0664 dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
0665
0666 return ret;
0667 }
0668
0669 static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
0670 {
0671 int i;
0672
0673 if (!md_ctrl->is_late_init)
0674 return;
0675
0676 for (i = 0; i < CLDMA_TXQ_NUM; i++)
0677 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
0678
0679 for (i = 0; i < CLDMA_RXQ_NUM; i++)
0680 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
0681
0682 dma_pool_destroy(md_ctrl->gpd_dmapool);
0683 md_ctrl->gpd_dmapool = NULL;
0684 md_ctrl->is_late_init = false;
0685 }
0686
0687 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
0688 {
0689 unsigned long flags;
0690 int i;
0691
0692 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0693 md_ctrl->txq_active = 0;
0694 md_ctrl->rxq_active = 0;
0695 t7xx_cldma_disable_irq(md_ctrl);
0696 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0697
0698 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
0699 cancel_work_sync(&md_ctrl->txq[i].cldma_work);
0700
0701 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0702 md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
0703 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0704 }
0705
0706 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
0707 cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
0708
0709 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0710 md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
0711 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0712 }
0713
0714 t7xx_cldma_late_release(md_ctrl);
0715 }
0716
0717
0718
0719
0720
0721
0722
0723
0724 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
0725 {
0726 unsigned long flags;
0727
0728 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0729 if (md_ctrl->is_late_init) {
0730 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
0731 int i;
0732
0733 t7xx_cldma_enable_irq(md_ctrl);
0734
0735 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
0736 if (md_ctrl->txq[i].tr_done)
0737 t7xx_cldma_hw_set_start_addr(hw_info, i,
0738 md_ctrl->txq[i].tr_done->gpd_addr,
0739 MTK_TX);
0740 }
0741
0742 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
0743 if (md_ctrl->rxq[i].tr_done)
0744 t7xx_cldma_hw_set_start_addr(hw_info, i,
0745 md_ctrl->rxq[i].tr_done->gpd_addr,
0746 MTK_RX);
0747 }
0748
0749
0750 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
0751 t7xx_cldma_hw_start(hw_info);
0752 md_ctrl->txq_started = 0;
0753 md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
0754 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
0755 }
0756 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0757 }
0758
0759 static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
0760 {
0761 struct cldma_queue *txq = &md_ctrl->txq[qnum];
0762 struct cldma_request *req;
0763 struct cldma_gpd *gpd;
0764 unsigned long flags;
0765
0766 spin_lock_irqsave(&txq->ring_lock, flags);
0767 t7xx_cldma_q_reset(txq);
0768 list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
0769 gpd = req->gpd;
0770 gpd->flags &= ~GPD_FLAGS_HWO;
0771 t7xx_cldma_gpd_set_data_ptr(gpd, 0);
0772 gpd->data_buff_len = 0;
0773 dev_kfree_skb_any(req->skb);
0774 req->skb = NULL;
0775 }
0776 spin_unlock_irqrestore(&txq->ring_lock, flags);
0777 }
0778
0779 static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
0780 {
0781 struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
0782 struct cldma_request *req;
0783 struct cldma_gpd *gpd;
0784 unsigned long flags;
0785 int ret = 0;
0786
0787 spin_lock_irqsave(&rxq->ring_lock, flags);
0788 t7xx_cldma_q_reset(rxq);
0789 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
0790 gpd = req->gpd;
0791 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
0792 gpd->data_buff_len = 0;
0793
0794 if (req->skb) {
0795 req->skb->len = 0;
0796 skb_reset_tail_pointer(req->skb);
0797 }
0798 }
0799
0800 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
0801 if (req->skb)
0802 continue;
0803
0804 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
0805 if (ret)
0806 break;
0807
0808 t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
0809 }
0810 spin_unlock_irqrestore(&rxq->ring_lock, flags);
0811
0812 return ret;
0813 }
0814
0815 void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
0816 {
0817 int i;
0818
0819 if (tx_rx == MTK_TX) {
0820 for (i = 0; i < CLDMA_TXQ_NUM; i++)
0821 t7xx_cldma_clear_txq(md_ctrl, i);
0822 } else {
0823 for (i = 0; i < CLDMA_RXQ_NUM; i++)
0824 t7xx_cldma_clear_rxq(md_ctrl, i);
0825 }
0826 }
0827
0828 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
0829 {
0830 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
0831 unsigned long flags;
0832
0833 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0834 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
0835 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
0836 if (tx_rx == MTK_RX)
0837 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
0838 else
0839 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
0840 t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
0841 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0842 }
0843
0844 static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
0845 struct sk_buff *skb)
0846 {
0847 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
0848 struct cldma_gpd *gpd = tx_req->gpd;
0849 unsigned long flags;
0850
0851
0852 tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
0853
0854 if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
0855 dev_err(md_ctrl->dev, "DMA mapping failed\n");
0856 return -ENOMEM;
0857 }
0858
0859 t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
0860 gpd->data_buff_len = cpu_to_le16(skb->len);
0861
0862
0863
0864
0865 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0866 if (md_ctrl->txq_active & BIT(queue->index))
0867 gpd->flags |= GPD_FLAGS_HWO;
0868
0869 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0870
0871 tx_req->skb = skb;
0872 return 0;
0873 }
0874
0875
0876 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
0877 struct cldma_request *prev_req)
0878 {
0879 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
0880
0881
0882 if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
0883 t7xx_cldma_hw_init(hw_info);
0884 t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
0885 md_ctrl->txq_started &= ~BIT(qno);
0886 }
0887
0888 if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
0889 if (md_ctrl->txq_started & BIT(qno))
0890 t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
0891 else
0892 t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
0893
0894 md_ctrl->txq_started |= BIT(qno);
0895 }
0896 }
0897
0898
0899
0900
0901
0902
0903 void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
0904 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
0905 {
0906 md_ctrl->recv_skb = recv_skb;
0907 }
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
0923 {
0924 struct cldma_request *tx_req;
0925 struct cldma_queue *queue;
0926 unsigned long flags;
0927 int ret;
0928
0929 if (qno >= CLDMA_TXQ_NUM)
0930 return -EINVAL;
0931
0932 ret = pm_runtime_resume_and_get(md_ctrl->dev);
0933 if (ret < 0 && ret != -EACCES)
0934 return ret;
0935
0936 t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
0937 queue = &md_ctrl->txq[qno];
0938
0939 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0940 if (!(md_ctrl->txq_active & BIT(qno))) {
0941 ret = -EIO;
0942 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0943 goto allow_sleep;
0944 }
0945 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0946
0947 do {
0948 spin_lock_irqsave(&queue->ring_lock, flags);
0949 tx_req = queue->tx_next;
0950 if (queue->budget > 0 && !tx_req->skb) {
0951 struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
0952
0953 queue->budget--;
0954 t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
0955 queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
0956 spin_unlock_irqrestore(&queue->ring_lock, flags);
0957
0958 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
0959 ret = -ETIMEDOUT;
0960 break;
0961 }
0962
0963
0964
0965
0966
0967 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0968 t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
0969 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0970
0971 break;
0972 }
0973 spin_unlock_irqrestore(&queue->ring_lock, flags);
0974
0975 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
0976 ret = -ETIMEDOUT;
0977 break;
0978 }
0979
0980 if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
0981 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
0982 t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
0983 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
0984 }
0985
0986 ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
0987 } while (!ret);
0988
0989 allow_sleep:
0990 t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
0991 pm_runtime_mark_last_busy(md_ctrl->dev);
0992 pm_runtime_put_autosuspend(md_ctrl->dev);
0993 return ret;
0994 }
0995
0996 static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
0997 {
0998 char dma_pool_name[32];
0999 int i, j, ret;
1000
1001 if (md_ctrl->is_late_init) {
1002 dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
1003 return -EALREADY;
1004 }
1005
1006 snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
1007
1008 md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
1009 sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
1010 if (!md_ctrl->gpd_dmapool) {
1011 dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
1012 return -ENOMEM;
1013 }
1014
1015 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1016 ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
1017 if (ret) {
1018 dev_err(md_ctrl->dev, "control TX ring init fail\n");
1019 goto err_free_tx_ring;
1020 }
1021 }
1022
1023 for (j = 0; j < CLDMA_RXQ_NUM; j++) {
1024 md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
1025
1026 if (j == CLDMA_RXQ_NUM - 1)
1027 md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
1028
1029 ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
1030 if (ret) {
1031 dev_err(md_ctrl->dev, "Control RX ring init fail\n");
1032 goto err_free_rx_ring;
1033 }
1034 }
1035
1036 for (i = 0; i < CLDMA_TXQ_NUM; i++)
1037 t7xx_cldma_txq_init(&md_ctrl->txq[i]);
1038
1039 for (j = 0; j < CLDMA_RXQ_NUM; j++)
1040 t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
1041
1042 md_ctrl->is_late_init = true;
1043 return 0;
1044
1045 err_free_rx_ring:
1046 while (j--)
1047 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
1048
1049 err_free_tx_ring:
1050 while (i--)
1051 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
1052
1053 return ret;
1054 }
1055
1056 static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
1057 {
1058 return addr + phy_addr - addr_trs1;
1059 }
1060
1061 static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
1062 {
1063 struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
1064 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1065 u32 phy_ao_base, phy_pd_base;
1066
1067 if (md_ctrl->hif_id != CLDMA_ID_MD)
1068 return;
1069
1070 phy_ao_base = CLDMA1_AO_BASE;
1071 phy_pd_base = CLDMA1_PD_BASE;
1072 hw_info->phy_interrupt_id = CLDMA1_INT;
1073 hw_info->hw_mode = MODE_BIT_64;
1074 hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1075 pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
1076 hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1077 pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
1078 }
1079
1080 static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
1081 {
1082 dev_kfree_skb_any(skb);
1083 return 0;
1084 }
1085
1086 int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
1087 {
1088 struct device *dev = &t7xx_dev->pdev->dev;
1089 struct cldma_ctrl *md_ctrl;
1090
1091 md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
1092 if (!md_ctrl)
1093 return -ENOMEM;
1094
1095 md_ctrl->t7xx_dev = t7xx_dev;
1096 md_ctrl->dev = dev;
1097 md_ctrl->hif_id = hif_id;
1098 md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
1099 t7xx_hw_info_init(md_ctrl);
1100 t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
1101 return 0;
1102 }
1103
1104 static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1105 {
1106 struct cldma_ctrl *md_ctrl = entity_param;
1107 struct t7xx_cldma_hw *hw_info;
1108 unsigned long flags;
1109 int qno_t;
1110
1111 hw_info = &md_ctrl->hw_info;
1112
1113 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1114 t7xx_cldma_hw_restore(hw_info);
1115 for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
1116 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
1117 MTK_TX);
1118 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
1119 MTK_RX);
1120 }
1121 t7xx_cldma_enable_irq(md_ctrl);
1122 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
1123 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
1124 t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1125 t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1126 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1127 }
1128
1129 static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1130 {
1131 struct cldma_ctrl *md_ctrl = entity_param;
1132 unsigned long flags;
1133
1134 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1135 md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
1136 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1137 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1138 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1139
1140 if (md_ctrl->hif_id == CLDMA_ID_MD)
1141 t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
1142
1143 return 0;
1144 }
1145
1146 static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1147 {
1148 struct cldma_ctrl *md_ctrl = entity_param;
1149 struct t7xx_cldma_hw *hw_info;
1150 unsigned long flags;
1151
1152 hw_info = &md_ctrl->hw_info;
1153
1154 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1155 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1156 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1157 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
1158 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
1159 t7xx_cldma_clear_ip_busy(hw_info);
1160 t7xx_cldma_disable_irq(md_ctrl);
1161 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1162 }
1163
1164 static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1165 {
1166 struct cldma_ctrl *md_ctrl = entity_param;
1167 struct t7xx_cldma_hw *hw_info;
1168 unsigned long flags;
1169
1170 if (md_ctrl->hif_id == CLDMA_ID_MD)
1171 t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
1172
1173 hw_info = &md_ctrl->hw_info;
1174
1175 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1176 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
1177 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
1178 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
1179 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
1180 md_ctrl->txq_started = 0;
1181 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1182
1183 return 0;
1184 }
1185
1186 static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
1187 {
1188 md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
1189 if (!md_ctrl->pm_entity)
1190 return -ENOMEM;
1191
1192 md_ctrl->pm_entity->entity_param = md_ctrl;
1193
1194 if (md_ctrl->hif_id == CLDMA_ID_MD)
1195 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
1196 else
1197 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
1198
1199 md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
1200 md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
1201 md_ctrl->pm_entity->resume = t7xx_cldma_resume;
1202 md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
1203
1204 return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1205 }
1206
1207 static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
1208 {
1209 if (!md_ctrl->pm_entity)
1210 return -EINVAL;
1211
1212 t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1213 kfree(md_ctrl->pm_entity);
1214 md_ctrl->pm_entity = NULL;
1215 return 0;
1216 }
1217
1218 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
1219 {
1220 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1221 unsigned long flags;
1222
1223 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1224 t7xx_cldma_hw_stop(hw_info, MTK_TX);
1225 t7xx_cldma_hw_stop(hw_info, MTK_RX);
1226 t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1227 t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1228 t7xx_cldma_hw_init(hw_info);
1229 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1230 }
1231
1232 static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
1233 {
1234 struct cldma_ctrl *md_ctrl = data;
1235 u32 interrupt;
1236
1237 interrupt = md_ctrl->hw_info.phy_interrupt_id;
1238 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
1239 t7xx_cldma_irq_work_cb(md_ctrl);
1240 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
1241 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
1242 return IRQ_HANDLED;
1243 }
1244
1245 static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
1246 {
1247 int i;
1248
1249 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1250 if (md_ctrl->txq[i].worker) {
1251 destroy_workqueue(md_ctrl->txq[i].worker);
1252 md_ctrl->txq[i].worker = NULL;
1253 }
1254 }
1255
1256 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1257 if (md_ctrl->rxq[i].worker) {
1258 destroy_workqueue(md_ctrl->rxq[i].worker);
1259 md_ctrl->rxq[i].worker = NULL;
1260 }
1261 }
1262 }
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
1277 {
1278 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1279 int ret, i;
1280
1281 md_ctrl->txq_active = 0;
1282 md_ctrl->rxq_active = 0;
1283 md_ctrl->is_late_init = false;
1284
1285 ret = t7xx_cldma_pm_init(md_ctrl);
1286 if (ret)
1287 return ret;
1288
1289 spin_lock_init(&md_ctrl->cldma_lock);
1290
1291 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1292 md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
1293 md_ctrl->txq[i].worker =
1294 alloc_workqueue("md_hif%d_tx%d_worker",
1295 WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
1296 1, md_ctrl->hif_id, i);
1297 if (!md_ctrl->txq[i].worker)
1298 goto err_workqueue;
1299
1300 INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
1301 }
1302
1303 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1304 md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
1305 INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
1306
1307 md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker",
1308 WQ_UNBOUND | WQ_MEM_RECLAIM,
1309 1, md_ctrl->hif_id, i);
1310 if (!md_ctrl->rxq[i].worker)
1311 goto err_workqueue;
1312 }
1313
1314 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1315 md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
1316 md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
1317 md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
1318 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1319 return 0;
1320
1321 err_workqueue:
1322 t7xx_cldma_destroy_wqs(md_ctrl);
1323 t7xx_cldma_pm_uninit(md_ctrl);
1324 return -ENOMEM;
1325 }
1326
1327 void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
1328 {
1329 t7xx_cldma_late_release(md_ctrl);
1330 t7xx_cldma_late_init(md_ctrl);
1331 }
1332
1333 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
1334 {
1335 t7xx_cldma_stop(md_ctrl);
1336 t7xx_cldma_late_release(md_ctrl);
1337 t7xx_cldma_destroy_wqs(md_ctrl);
1338 t7xx_cldma_pm_uninit(md_ctrl);
1339 }