0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/dmapool.h>
0022 #include <linux/iopoll.h>
0023
0024 #include "mtu3.h"
0025 #include "mtu3_trace.h"
0026
0027 #define QMU_CHECKSUM_LEN 16
0028
0029 #define GPD_FLAGS_HWO BIT(0)
0030 #define GPD_FLAGS_BDP BIT(1)
0031 #define GPD_FLAGS_BPS BIT(2)
0032 #define GPD_FLAGS_ZLP BIT(6)
0033 #define GPD_FLAGS_IOC BIT(7)
0034 #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
0035
0036 #define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16)
0037 #define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12)
0038 #define GPD_RX_BUF_LEN(mtu, x) \
0039 ({ \
0040 typeof(x) x_ = (x); \
0041 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
0042 })
0043
0044 #define GPD_DATA_LEN_OG(x) ((x) & 0xffff)
0045 #define GPD_DATA_LEN_EL(x) ((x) & 0xfffff)
0046 #define GPD_DATA_LEN(mtu, x) \
0047 ({ \
0048 typeof(x) x_ = (x); \
0049 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
0050 })
0051
0052 #define GPD_EXT_FLAG_ZLP BIT(29)
0053 #define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20)
0054 #define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16)
0055 #define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28)
0056 #define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24)
0057 #define GPD_EXT_NGP(mtu, x) \
0058 ({ \
0059 typeof(x) x_ = (x); \
0060 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
0061 })
0062
0063 #define GPD_EXT_BUF(mtu, x) \
0064 ({ \
0065 typeof(x) x_ = (x); \
0066 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
0067 })
0068
0069 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
0070 #define HILO_DMA(hi, lo) \
0071 ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
0072
0073 static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
0074 {
0075 u32 txcpr;
0076 u32 txhiar;
0077
0078 txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
0079 txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
0080
0081 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
0082 }
0083
0084 static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
0085 {
0086 u32 rxcpr;
0087 u32 rxhiar;
0088
0089 rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
0090 rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
0091
0092 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
0093 }
0094
0095 static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
0096 {
0097 u32 tqhiar;
0098
0099 mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
0100 cpu_to_le32(lower_32_bits(dma)));
0101 tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
0102 tqhiar &= ~QMU_START_ADDR_HI_MSK;
0103 tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
0104 mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
0105 }
0106
0107 static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
0108 {
0109 u32 rqhiar;
0110
0111 mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
0112 cpu_to_le32(lower_32_bits(dma)));
0113 rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
0114 rqhiar &= ~QMU_START_ADDR_HI_MSK;
0115 rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
0116 mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
0117 }
0118
0119 static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
0120 dma_addr_t dma_addr)
0121 {
0122 dma_addr_t dma_base = ring->dma;
0123 struct qmu_gpd *gpd_head = ring->start;
0124 u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
0125
0126 if (offset >= MAX_GPD_NUM)
0127 return NULL;
0128
0129 return gpd_head + offset;
0130 }
0131
0132 static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
0133 struct qmu_gpd *gpd)
0134 {
0135 dma_addr_t dma_base = ring->dma;
0136 struct qmu_gpd *gpd_head = ring->start;
0137 u32 offset;
0138
0139 offset = gpd - gpd_head;
0140 if (offset >= MAX_GPD_NUM)
0141 return 0;
0142
0143 return dma_base + (offset * sizeof(*gpd));
0144 }
0145
0146 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
0147 {
0148 ring->start = gpd;
0149 ring->enqueue = gpd;
0150 ring->dequeue = gpd;
0151 ring->end = gpd + MAX_GPD_NUM - 1;
0152 }
0153
0154 static void reset_gpd_list(struct mtu3_ep *mep)
0155 {
0156 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0157 struct qmu_gpd *gpd = ring->start;
0158
0159 if (gpd) {
0160 gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
0161 gpd_ring_init(ring, gpd);
0162 }
0163 }
0164
0165 int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
0166 {
0167 struct qmu_gpd *gpd;
0168 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0169
0170
0171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
0172 if (gpd == NULL)
0173 return -ENOMEM;
0174
0175 gpd_ring_init(ring, gpd);
0176
0177 return 0;
0178 }
0179
0180 void mtu3_gpd_ring_free(struct mtu3_ep *mep)
0181 {
0182 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0183
0184 dma_pool_free(mep->mtu->qmu_gpd_pool,
0185 ring->start, ring->dma);
0186 memset(ring, 0, sizeof(*ring));
0187 }
0188
0189 void mtu3_qmu_resume(struct mtu3_ep *mep)
0190 {
0191 struct mtu3 *mtu = mep->mtu;
0192 void __iomem *mbase = mtu->mac_base;
0193 int epnum = mep->epnum;
0194 u32 offset;
0195
0196 offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
0197
0198 mtu3_writel(mbase, offset, QMU_Q_RESUME);
0199 if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
0200 mtu3_writel(mbase, offset, QMU_Q_RESUME);
0201 }
0202
0203 static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
0204 {
0205 if (ring->enqueue < ring->end)
0206 ring->enqueue++;
0207 else
0208 ring->enqueue = ring->start;
0209
0210 return ring->enqueue;
0211 }
0212
0213 static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
0214 {
0215 if (ring->dequeue < ring->end)
0216 ring->dequeue++;
0217 else
0218 ring->dequeue = ring->start;
0219
0220 return ring->dequeue;
0221 }
0222
0223
0224 static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
0225 {
0226 struct qmu_gpd *enq = ring->enqueue;
0227 struct qmu_gpd *next;
0228
0229 if (ring->enqueue < ring->end)
0230 next = enq + 1;
0231 else
0232 next = ring->start;
0233
0234
0235 return next == ring->dequeue;
0236 }
0237
0238 int mtu3_prepare_transfer(struct mtu3_ep *mep)
0239 {
0240 return gpd_ring_empty(&mep->gpd_ring);
0241 }
0242
0243 static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
0244 {
0245 struct qmu_gpd *enq;
0246 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0247 struct qmu_gpd *gpd = ring->enqueue;
0248 struct usb_request *req = &mreq->request;
0249 struct mtu3 *mtu = mep->mtu;
0250 dma_addr_t enq_dma;
0251 u32 ext_addr;
0252
0253 gpd->dw0_info = 0;
0254 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
0255 ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
0256 gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
0257
0258
0259 enq = advance_enq_gpd(ring);
0260 enq_dma = gpd_virt_to_dma(ring, enq);
0261 dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
0262 mep->epnum, gpd, enq, &enq_dma);
0263
0264 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
0265 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
0266 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
0267 gpd->dw0_info = cpu_to_le32(ext_addr);
0268
0269 if (req->zero) {
0270 if (mtu->gen2cp)
0271 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
0272 else
0273 gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
0274 }
0275
0276
0277 mb();
0278 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
0279
0280 mreq->gpd = gpd;
0281 trace_mtu3_prepare_gpd(mep, gpd);
0282
0283 return 0;
0284 }
0285
0286 static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
0287 {
0288 struct qmu_gpd *enq;
0289 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0290 struct qmu_gpd *gpd = ring->enqueue;
0291 struct usb_request *req = &mreq->request;
0292 struct mtu3 *mtu = mep->mtu;
0293 dma_addr_t enq_dma;
0294 u32 ext_addr;
0295
0296 gpd->dw0_info = 0;
0297 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
0298 ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
0299 gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
0300
0301
0302 enq = advance_enq_gpd(ring);
0303 enq_dma = gpd_virt_to_dma(ring, enq);
0304 dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
0305 mep->epnum, gpd, enq, &enq_dma);
0306
0307 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
0308 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
0309 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
0310 gpd->dw3_info = cpu_to_le32(ext_addr);
0311
0312 mb();
0313 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
0314
0315 mreq->gpd = gpd;
0316 trace_mtu3_prepare_gpd(mep, gpd);
0317
0318 return 0;
0319 }
0320
0321 void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
0322 {
0323
0324 if (mep->is_in)
0325 mtu3_prepare_tx_gpd(mep, mreq);
0326 else
0327 mtu3_prepare_rx_gpd(mep, mreq);
0328 }
0329
0330 int mtu3_qmu_start(struct mtu3_ep *mep)
0331 {
0332 struct mtu3 *mtu = mep->mtu;
0333 void __iomem *mbase = mtu->mac_base;
0334 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0335 u8 epnum = mep->epnum;
0336
0337 if (mep->is_in) {
0338
0339 write_txq_start_addr(mbase, epnum, ring->dma);
0340 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
0341
0342 mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
0343 mtu3_writel(mbase, U3D_TQERRIESR0,
0344 QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
0345
0346 if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
0347 dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
0348 return 0;
0349 }
0350 mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
0351
0352 } else {
0353 write_rxq_start_addr(mbase, epnum, ring->dma);
0354 mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
0355
0356 mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
0357
0358 mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
0359 mtu3_writel(mbase, U3D_RQERRIESR0,
0360 QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
0361 mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
0362
0363 if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
0364 dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
0365 return 0;
0366 }
0367 mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
0368 }
0369
0370 return 0;
0371 }
0372
0373
0374 void mtu3_qmu_stop(struct mtu3_ep *mep)
0375 {
0376 struct mtu3 *mtu = mep->mtu;
0377 void __iomem *mbase = mtu->mac_base;
0378 int epnum = mep->epnum;
0379 u32 value = 0;
0380 u32 qcsr;
0381 int ret;
0382
0383 qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
0384
0385 if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
0386 dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
0387 return;
0388 }
0389 mtu3_writel(mbase, qcsr, QMU_Q_STOP);
0390
0391 ret = readl_poll_timeout_atomic(mbase + qcsr, value,
0392 !(value & QMU_Q_ACTIVE), 1, 1000);
0393 if (ret) {
0394 dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
0395 return;
0396 }
0397
0398 dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
0399 }
0400
0401 void mtu3_qmu_flush(struct mtu3_ep *mep)
0402 {
0403
0404 dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
0405 ((mep->is_in) ? "TX" : "RX"));
0406
0407
0408 mtu3_qmu_stop(mep);
0409 reset_gpd_list(mep);
0410 }
0411
0412
0413
0414
0415
0416
0417 static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
0418 {
0419 struct mtu3_ep *mep = mtu->in_eps + epnum;
0420 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0421 void __iomem *mbase = mtu->mac_base;
0422 struct qmu_gpd *gpd_current = NULL;
0423 struct mtu3_request *mreq;
0424 dma_addr_t cur_gpd_dma;
0425 u32 txcsr = 0;
0426 int ret;
0427
0428 mreq = next_request(mep);
0429 if (mreq && mreq->request.length != 0)
0430 return;
0431
0432 cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
0433 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
0434
0435 if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
0436 dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
0437 return;
0438 }
0439
0440 dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
0441 trace_mtu3_zlp_exp_gpd(mep, gpd_current);
0442
0443 mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
0444
0445 ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
0446 txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
0447 if (ret) {
0448 dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
0449 return;
0450 }
0451 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
0452
0453 mb();
0454
0455 gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
0456
0457
0458 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
0459 mtu3_qmu_resume(mep);
0460 }
0461
0462
0463
0464
0465
0466
0467
0468
0469 static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
0470 {
0471 struct mtu3_ep *mep = mtu->in_eps + epnum;
0472 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0473 void __iomem *mbase = mtu->mac_base;
0474 struct qmu_gpd *gpd = ring->dequeue;
0475 struct qmu_gpd *gpd_current = NULL;
0476 struct usb_request *request = NULL;
0477 struct mtu3_request *mreq;
0478 dma_addr_t cur_gpd_dma;
0479
0480
0481 cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
0482 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
0483
0484 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
0485 __func__, epnum, gpd, gpd_current, ring->enqueue);
0486
0487 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
0488
0489 mreq = next_request(mep);
0490
0491 if (mreq == NULL || mreq->gpd != gpd) {
0492 dev_err(mtu->dev, "no correct TX req is found\n");
0493 break;
0494 }
0495
0496 request = &mreq->request;
0497 request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
0498 trace_mtu3_complete_gpd(mep, gpd);
0499 mtu3_req_complete(mep, request, 0);
0500
0501 gpd = advance_deq_gpd(ring);
0502 }
0503
0504 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
0505 __func__, epnum, ring->dequeue, ring->enqueue);
0506
0507 }
0508
0509 static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
0510 {
0511 struct mtu3_ep *mep = mtu->out_eps + epnum;
0512 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
0513 void __iomem *mbase = mtu->mac_base;
0514 struct qmu_gpd *gpd = ring->dequeue;
0515 struct qmu_gpd *gpd_current = NULL;
0516 struct usb_request *req = NULL;
0517 struct mtu3_request *mreq;
0518 dma_addr_t cur_gpd_dma;
0519
0520 cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
0521 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
0522
0523 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
0524 __func__, epnum, gpd, gpd_current, ring->enqueue);
0525
0526 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
0527
0528 mreq = next_request(mep);
0529
0530 if (mreq == NULL || mreq->gpd != gpd) {
0531 dev_err(mtu->dev, "no correct RX req is found\n");
0532 break;
0533 }
0534 req = &mreq->request;
0535
0536 req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
0537 trace_mtu3_complete_gpd(mep, gpd);
0538 mtu3_req_complete(mep, req, 0);
0539
0540 gpd = advance_deq_gpd(ring);
0541 }
0542
0543 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
0544 __func__, epnum, ring->dequeue, ring->enqueue);
0545 }
0546
0547 static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
0548 {
0549 int i;
0550
0551 for (i = 1; i < mtu->num_eps; i++) {
0552 if (done_status & QMU_RX_DONE_INT(i))
0553 qmu_done_rx(mtu, i);
0554 if (done_status & QMU_TX_DONE_INT(i))
0555 qmu_done_tx(mtu, i);
0556 }
0557 }
0558
0559 static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
0560 {
0561 void __iomem *mbase = mtu->mac_base;
0562 u32 errval;
0563 int i;
0564
0565 if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
0566 errval = mtu3_readl(mbase, U3D_RQERRIR0);
0567 for (i = 1; i < mtu->num_eps; i++) {
0568 if (errval & QMU_RX_CS_ERR(i))
0569 dev_err(mtu->dev, "Rx %d CS error!\n", i);
0570
0571 if (errval & QMU_RX_LEN_ERR(i))
0572 dev_err(mtu->dev, "RX %d Length error\n", i);
0573 }
0574 mtu3_writel(mbase, U3D_RQERRIR0, errval);
0575 }
0576
0577 if (qmu_status & RXQ_ZLPERR_INT) {
0578 errval = mtu3_readl(mbase, U3D_RQERRIR1);
0579 for (i = 1; i < mtu->num_eps; i++) {
0580 if (errval & QMU_RX_ZLP_ERR(i))
0581 dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
0582 }
0583 mtu3_writel(mbase, U3D_RQERRIR1, errval);
0584 }
0585
0586 if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
0587 errval = mtu3_readl(mbase, U3D_TQERRIR0);
0588 for (i = 1; i < mtu->num_eps; i++) {
0589 if (errval & QMU_TX_CS_ERR(i))
0590 dev_err(mtu->dev, "Tx %d checksum error!\n", i);
0591
0592 if (errval & QMU_TX_LEN_ERR(i))
0593 qmu_tx_zlp_error_handler(mtu, i);
0594 }
0595 mtu3_writel(mbase, U3D_TQERRIR0, errval);
0596 }
0597 }
0598
0599 irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
0600 {
0601 void __iomem *mbase = mtu->mac_base;
0602 u32 qmu_status;
0603 u32 qmu_done_status;
0604
0605
0606 qmu_status = mtu3_readl(mbase, U3D_QISAR1);
0607 qmu_status &= mtu3_readl(mbase, U3D_QIER1);
0608
0609 qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
0610 qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
0611 mtu3_writel(mbase, U3D_QISAR0, qmu_done_status);
0612 dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
0613 (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
0614 qmu_status);
0615 trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
0616
0617 if (qmu_done_status)
0618 qmu_done_isr(mtu, qmu_done_status);
0619
0620 if (qmu_status)
0621 qmu_exception_isr(mtu, qmu_status);
0622
0623 return IRQ_HANDLED;
0624 }
0625
0626 int mtu3_qmu_init(struct mtu3 *mtu)
0627 {
0628
0629 compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
0630
0631 mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
0632 QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
0633
0634 if (!mtu->qmu_gpd_pool)
0635 return -ENOMEM;
0636
0637 return 0;
0638 }
0639
0640 void mtu3_qmu_exit(struct mtu3 *mtu)
0641 {
0642 dma_pool_destroy(mtu->qmu_gpd_pool);
0643 }