Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2021, MediaTek Inc.
0004  * Copyright (c) 2021-2022, Intel Corporation.
0005  *
0006  * Authors:
0007  *  Amir Hanania <amir.hanania@intel.com>
0008  *  Haijun Liu <haijun.liu@mediatek.com>
0009  *  Moises Veleta <moises.veleta@intel.com>
0010  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
0011  *
0012  * Contributors:
0013  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
0014  *  Eliot Lee <eliot.lee@intel.com>
0015  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
0016  */
0017 
0018 #include <linux/device.h>
0019 #include <linux/gfp.h>
0020 #include <linux/irqreturn.h>
0021 #include <linux/kernel.h>
0022 #include <linux/list.h>
0023 #include <linux/string.h>
0024 #include <linux/wait.h>
0025 #include <linux/workqueue.h>
0026 
0027 #include "t7xx_dpmaif.h"
0028 #include "t7xx_hif_dpmaif.h"
0029 #include "t7xx_hif_dpmaif_rx.h"
0030 #include "t7xx_hif_dpmaif_tx.h"
0031 #include "t7xx_pci.h"
0032 #include "t7xx_pcie_mac.h"
0033 #include "t7xx_state_monitor.h"
0034 
0035 unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx)
0036 {
0037     buf_idx++;
0038 
0039     return buf_idx < buf_len ? buf_idx : 0;
0040 }
0041 
0042 unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
0043                        unsigned int wr_idx, enum dpmaif_rdwr rd_wr)
0044 {
0045     int pkt_cnt;
0046 
0047     if (rd_wr == DPMAIF_READ)
0048         pkt_cnt = wr_idx - rd_idx;
0049     else
0050         pkt_cnt = rd_idx - wr_idx - 1;
0051 
0052     if (pkt_cnt < 0)
0053         pkt_cnt += total_cnt;
0054 
0055     return (unsigned int)pkt_cnt;
0056 }
0057 
0058 static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
0059 {
0060     struct dpmaif_isr_para *isr_para;
0061     int i;
0062 
0063     for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
0064         isr_para = &dpmaif_ctrl->isr_para[i];
0065         t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
0066     }
0067 }
0068 
0069 static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
0070 {
0071     struct dpmaif_isr_para *isr_para;
0072     int i;
0073 
0074     for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
0075         isr_para = &dpmaif_ctrl->isr_para[i];
0076         t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
0077     }
0078 }
0079 
0080 static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para)
0081 {
0082     struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
0083     struct dpmaif_hw_intr_st_para intr_status;
0084     struct device *dev = dpmaif_ctrl->dev;
0085     struct dpmaif_hw_info *hw_info;
0086     int i;
0087 
0088     memset(&intr_status, 0, sizeof(intr_status));
0089     hw_info = &dpmaif_ctrl->hw_info;
0090 
0091     if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) {
0092         dev_err(dev, "Failed to get HW interrupt count\n");
0093         return;
0094     }
0095 
0096     t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
0097 
0098     for (i = 0; i < intr_status.intr_cnt; i++) {
0099         switch (intr_status.intr_types[i]) {
0100         case DPF_INTR_UL_DONE:
0101             t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
0102             break;
0103 
0104         case DPF_INTR_UL_DRB_EMPTY:
0105         case DPF_INTR_UL_MD_NOTREADY:
0106         case DPF_INTR_UL_MD_PWR_NOTREADY:
0107             /* No need to log an error for these */
0108             break;
0109 
0110         case DPF_INTR_DL_BATCNT_LEN_ERR:
0111             dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n");
0112             t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info);
0113             break;
0114 
0115         case DPF_INTR_DL_PITCNT_LEN_ERR:
0116             dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n");
0117             t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info);
0118             break;
0119 
0120         case DPF_INTR_DL_Q0_PITCNT_LEN_ERR:
0121             dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n");
0122             t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT);
0123             break;
0124 
0125         case DPF_INTR_DL_Q1_PITCNT_LEN_ERR:
0126             dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n");
0127             t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1);
0128             break;
0129 
0130         case DPF_INTR_DL_DONE:
0131         case DPF_INTR_DL_Q0_DONE:
0132         case DPF_INTR_DL_Q1_DONE:
0133             t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
0134             break;
0135 
0136         default:
0137             dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n",
0138                         intr_status.intr_types[i]);
0139         }
0140     }
0141 }
0142 
0143 static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
0144 {
0145     struct dpmaif_isr_para *isr_para = data;
0146     struct dpmaif_ctrl *dpmaif_ctrl;
0147 
0148     dpmaif_ctrl = isr_para->dpmaif_ctrl;
0149     if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
0150         dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n");
0151         return IRQ_HANDLED;
0152     }
0153 
0154     t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
0155     t7xx_dpmaif_irq_cb(isr_para);
0156     t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
0157     return IRQ_HANDLED;
0158 }
0159 
0160 static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl)
0161 {
0162     struct dpmaif_isr_para *isr_para;
0163     unsigned char i;
0164 
0165     dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT;
0166     dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT;
0167 
0168     for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
0169         isr_para = &dpmaif_ctrl->isr_para[i];
0170         isr_para->dpmaif_ctrl = dpmaif_ctrl;
0171         isr_para->dlq_id = i;
0172         isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i];
0173     }
0174 }
0175 
0176 static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
0177 {
0178     struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev;
0179     struct dpmaif_isr_para *isr_para;
0180     enum t7xx_int int_type;
0181     int i;
0182 
0183     t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl);
0184 
0185     for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
0186         isr_para = &dpmaif_ctrl->isr_para[i];
0187         int_type = isr_para->pcie_int;
0188         t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
0189 
0190         t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
0191         t7xx_dev->intr_thread[int_type] = NULL;
0192         t7xx_dev->callback_param[int_type] = isr_para;
0193 
0194         t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
0195         t7xx_pcie_mac_set_int(t7xx_dev, int_type);
0196     }
0197 }
0198 
0199 static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl)
0200 {
0201     struct dpmaif_rx_queue *rx_q;
0202     struct dpmaif_tx_queue *tx_q;
0203     int ret, rx_idx, tx_idx, i;
0204 
0205     ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL);
0206     if (ret) {
0207         dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret);
0208         return ret;
0209     }
0210 
0211     ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG);
0212     if (ret) {
0213         dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret);
0214         goto err_free_normal_bat;
0215     }
0216 
0217     for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) {
0218         rx_q = &dpmaif_ctrl->rxq[rx_idx];
0219         rx_q->index = rx_idx;
0220         rx_q->dpmaif_ctrl = dpmaif_ctrl;
0221         ret = t7xx_dpmaif_rxq_init(rx_q);
0222         if (ret)
0223             goto err_free_rxq;
0224     }
0225 
0226     for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) {
0227         tx_q = &dpmaif_ctrl->txq[tx_idx];
0228         tx_q->index = tx_idx;
0229         tx_q->dpmaif_ctrl = dpmaif_ctrl;
0230         ret = t7xx_dpmaif_txq_init(tx_q);
0231         if (ret)
0232             goto err_free_txq;
0233     }
0234 
0235     ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl);
0236     if (ret) {
0237         dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n");
0238         goto err_free_txq;
0239     }
0240 
0241     ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl);
0242     if (ret)
0243         goto err_thread_rel;
0244 
0245     return 0;
0246 
0247 err_thread_rel:
0248     t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
0249 
0250 err_free_txq:
0251     for (i = 0; i < tx_idx; i++) {
0252         tx_q = &dpmaif_ctrl->txq[i];
0253         t7xx_dpmaif_txq_free(tx_q);
0254     }
0255 
0256 err_free_rxq:
0257     for (i = 0; i < rx_idx; i++) {
0258         rx_q = &dpmaif_ctrl->rxq[i];
0259         t7xx_dpmaif_rxq_free(rx_q);
0260     }
0261 
0262     t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag);
0263 
0264 err_free_normal_bat:
0265     t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req);
0266 
0267     return ret;
0268 }
0269 
0270 static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl)
0271 {
0272     struct dpmaif_rx_queue *rx_q;
0273     struct dpmaif_tx_queue *tx_q;
0274     int i;
0275 
0276     t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
0277     t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl);
0278 
0279     for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
0280         tx_q = &dpmaif_ctrl->txq[i];
0281         t7xx_dpmaif_txq_free(tx_q);
0282     }
0283 
0284     for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
0285         rx_q = &dpmaif_ctrl->rxq[i];
0286         t7xx_dpmaif_rxq_free(rx_q);
0287     }
0288 }
0289 
0290 static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl)
0291 {
0292     struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
0293     struct dpmaif_hw_params hw_init_para;
0294     struct dpmaif_rx_queue *rxq;
0295     struct dpmaif_tx_queue *txq;
0296     unsigned int buf_cnt;
0297     int i, ret = 0;
0298 
0299     if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)
0300         return -EFAULT;
0301 
0302     memset(&hw_init_para, 0, sizeof(hw_init_para));
0303 
0304     for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
0305         rxq = &dpmaif_ctrl->rxq[i];
0306         rxq->que_started = true;
0307         rxq->index = i;
0308         rxq->budget = rxq->bat_req->bat_size_cnt - 1;
0309 
0310         hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr;
0311         hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt;
0312         hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr;
0313         hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt;
0314         hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr;
0315         hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt;
0316     }
0317 
0318     bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt);
0319     buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1;
0320     ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true);
0321     if (ret) {
0322         dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret);
0323         return ret;
0324     }
0325 
0326     buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1;
0327     ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true);
0328     if (ret) {
0329         dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret);
0330         goto err_free_normal_bat;
0331     }
0332 
0333     for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
0334         txq = &dpmaif_ctrl->txq[i];
0335         txq->que_started = true;
0336 
0337         hw_init_para.drb_base_addr[i] = txq->drb_bus_addr;
0338         hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt;
0339     }
0340 
0341     ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para);
0342     if (ret) {
0343         dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret);
0344         goto err_free_frag_bat;
0345     }
0346 
0347     ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1);
0348     if (ret)
0349         goto err_free_frag_bat;
0350 
0351     ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1);
0352     if (ret)
0353         goto err_free_frag_bat;
0354 
0355     t7xx_dpmaif_ul_clr_all_intr(hw_info);
0356     t7xx_dpmaif_dl_clr_all_intr(hw_info);
0357     dpmaif_ctrl->state = DPMAIF_STATE_PWRON;
0358     t7xx_dpmaif_enable_irq(dpmaif_ctrl);
0359     wake_up(&dpmaif_ctrl->tx_wq);
0360     return 0;
0361 
0362 err_free_frag_bat:
0363     t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
0364 
0365 err_free_normal_bat:
0366     t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
0367 
0368     return ret;
0369 }
0370 
0371 static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl)
0372 {
0373     t7xx_dpmaif_tx_stop(dpmaif_ctrl);
0374     t7xx_dpmaif_rx_stop(dpmaif_ctrl);
0375 }
0376 
0377 static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl)
0378 {
0379     t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
0380     t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
0381 }
0382 
0383 static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl)
0384 {
0385     if (!dpmaif_ctrl->dpmaif_sw_init_done) {
0386         dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n");
0387         return -EFAULT;
0388     }
0389 
0390     if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF)
0391         return -EFAULT;
0392 
0393     t7xx_dpmaif_disable_irq(dpmaif_ctrl);
0394     dpmaif_ctrl->state = DPMAIF_STATE_PWROFF;
0395     t7xx_dpmaif_stop_sw(dpmaif_ctrl);
0396     t7xx_dpmaif_tx_clear(dpmaif_ctrl);
0397     t7xx_dpmaif_rx_clear(dpmaif_ctrl);
0398     return 0;
0399 }
0400 
0401 static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param)
0402 {
0403     struct dpmaif_ctrl *dpmaif_ctrl = param;
0404 
0405     t7xx_dpmaif_tx_stop(dpmaif_ctrl);
0406     t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
0407     t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
0408     t7xx_dpmaif_disable_irq(dpmaif_ctrl);
0409     t7xx_dpmaif_rx_stop(dpmaif_ctrl);
0410     return 0;
0411 }
0412 
0413 static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl)
0414 {
0415     int qno;
0416 
0417     for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++)
0418         t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno);
0419 }
0420 
0421 static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl)
0422 {
0423     struct dpmaif_rx_queue *rxq;
0424     struct dpmaif_tx_queue *txq;
0425     unsigned int que_cnt;
0426 
0427     for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) {
0428         txq = &dpmaif_ctrl->txq[que_cnt];
0429         txq->que_started = true;
0430     }
0431 
0432     for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) {
0433         rxq = &dpmaif_ctrl->rxq[que_cnt];
0434         rxq->que_started = true;
0435     }
0436 }
0437 
0438 static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param)
0439 {
0440     struct dpmaif_ctrl *dpmaif_ctrl = param;
0441 
0442     if (!dpmaif_ctrl)
0443         return 0;
0444 
0445     t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl);
0446     t7xx_dpmaif_enable_irq(dpmaif_ctrl);
0447     t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl);
0448     t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info);
0449     wake_up(&dpmaif_ctrl->tx_wq);
0450     return 0;
0451 }
0452 
0453 static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl)
0454 {
0455     struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
0456     int ret;
0457 
0458     INIT_LIST_HEAD(&dpmaif_pm_entity->entity);
0459     dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend;
0460     dpmaif_pm_entity->suspend_late = NULL;
0461     dpmaif_pm_entity->resume_early = NULL;
0462     dpmaif_pm_entity->resume = &t7xx_dpmaif_resume;
0463     dpmaif_pm_entity->id = PM_ENTITY_ID_DATA;
0464     dpmaif_pm_entity->entity_param = dpmaif_ctrl;
0465 
0466     ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
0467     if (ret)
0468         dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
0469 
0470     return ret;
0471 }
0472 
0473 static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl)
0474 {
0475     struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
0476     int ret;
0477 
0478     ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
0479     if (ret < 0)
0480         dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
0481 
0482     return ret;
0483 }
0484 
0485 int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state)
0486 {
0487     int ret = 0;
0488 
0489     switch (state) {
0490     case MD_STATE_WAITING_FOR_HS1:
0491         ret = t7xx_dpmaif_start(dpmaif_ctrl);
0492         break;
0493 
0494     case MD_STATE_EXCEPTION:
0495         ret = t7xx_dpmaif_stop(dpmaif_ctrl);
0496         break;
0497 
0498     case MD_STATE_STOPPED:
0499         ret = t7xx_dpmaif_stop(dpmaif_ctrl);
0500         break;
0501 
0502     case MD_STATE_WAITING_TO_STOP:
0503         t7xx_dpmaif_stop_hw(dpmaif_ctrl);
0504         break;
0505 
0506     default:
0507         break;
0508     }
0509 
0510     return ret;
0511 }
0512 
0513 /**
0514  * t7xx_dpmaif_hif_init() - Initialize data path.
0515  * @t7xx_dev: MTK context structure.
0516  * @callbacks: Callbacks implemented by the network layer to handle RX skb and
0517  *         event notifications.
0518  *
0519  * Allocate and initialize datapath control block.
0520  * Register datapath ISR, TX and RX resources.
0521  *
0522  * Return:
0523  * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure.
0524  * * NULL        - In case of error.
0525  */
0526 struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
0527                      struct dpmaif_callbacks *callbacks)
0528 {
0529     struct device *dev = &t7xx_dev->pdev->dev;
0530     struct dpmaif_ctrl *dpmaif_ctrl;
0531     int ret;
0532 
0533     if (!callbacks)
0534         return NULL;
0535 
0536     dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL);
0537     if (!dpmaif_ctrl)
0538         return NULL;
0539 
0540     dpmaif_ctrl->t7xx_dev = t7xx_dev;
0541     dpmaif_ctrl->callbacks = callbacks;
0542     dpmaif_ctrl->dev = dev;
0543     dpmaif_ctrl->dpmaif_sw_init_done = false;
0544     dpmaif_ctrl->hw_info.dev = dev;
0545     dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base -
0546                      t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
0547 
0548     ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl);
0549     if (ret)
0550         return NULL;
0551 
0552     t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl);
0553     t7xx_dpmaif_disable_irq(dpmaif_ctrl);
0554 
0555     ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl);
0556     if (ret) {
0557         t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
0558         dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret);
0559         return NULL;
0560     }
0561 
0562     dpmaif_ctrl->dpmaif_sw_init_done = true;
0563     return dpmaif_ctrl;
0564 }
0565 
0566 void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl)
0567 {
0568     if (dpmaif_ctrl->dpmaif_sw_init_done) {
0569         t7xx_dpmaif_stop(dpmaif_ctrl);
0570         t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
0571         t7xx_dpmaif_sw_release(dpmaif_ctrl);
0572         dpmaif_ctrl->dpmaif_sw_init_done = false;
0573     }
0574 }