Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Altera TSE SGDMA and MSGDMA Linux driver
0003  * Copyright (C) 2014 Altera Corporation. All rights reserved
0004  */
0005 
0006 #include <linux/list.h>
0007 #include "altera_utils.h"
0008 #include "altera_tse.h"
0009 #include "altera_sgdmahw.h"
0010 #include "altera_sgdma.h"
0011 
0012 static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
0013                 struct sgdma_descrip __iomem *ndesc,
0014                 dma_addr_t ndesc_phys,
0015                 dma_addr_t raddr,
0016                 dma_addr_t waddr,
0017                 u16 length,
0018                 int generate_eop,
0019                 int rfixed,
0020                 int wfixed);
0021 
0022 static int sgdma_async_write(struct altera_tse_private *priv,
0023                   struct sgdma_descrip __iomem *desc);
0024 
0025 static int sgdma_async_read(struct altera_tse_private *priv);
0026 
0027 static dma_addr_t
0028 sgdma_txphysaddr(struct altera_tse_private *priv,
0029          struct sgdma_descrip __iomem *desc);
0030 
0031 static dma_addr_t
0032 sgdma_rxphysaddr(struct altera_tse_private *priv,
0033          struct sgdma_descrip __iomem *desc);
0034 
0035 static int sgdma_txbusy(struct altera_tse_private *priv);
0036 
0037 static int sgdma_rxbusy(struct altera_tse_private *priv);
0038 
0039 static void
0040 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
0041 
0042 static void
0043 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
0044 
0045 static struct tse_buffer *
0046 dequeue_tx(struct altera_tse_private *priv);
0047 
0048 static struct tse_buffer *
0049 dequeue_rx(struct altera_tse_private *priv);
0050 
0051 static struct tse_buffer *
0052 queue_rx_peekhead(struct altera_tse_private *priv);
0053 
0054 int sgdma_initialize(struct altera_tse_private *priv)
0055 {
0056     priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
0057               SGDMA_CTRLREG_INTEN;
0058 
0059     priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
0060               SGDMA_CTRLREG_INTEN |
0061               SGDMA_CTRLREG_ILASTD;
0062 
0063     INIT_LIST_HEAD(&priv->txlisthd);
0064     INIT_LIST_HEAD(&priv->rxlisthd);
0065 
0066     priv->rxdescphys = (dma_addr_t) 0;
0067     priv->txdescphys = (dma_addr_t) 0;
0068 
0069     priv->rxdescphys = dma_map_single(priv->device,
0070                       (void __force *)priv->rx_dma_desc,
0071                       priv->rxdescmem, DMA_BIDIRECTIONAL);
0072 
0073     if (dma_mapping_error(priv->device, priv->rxdescphys)) {
0074         sgdma_uninitialize(priv);
0075         netdev_err(priv->dev, "error mapping rx descriptor memory\n");
0076         return -EINVAL;
0077     }
0078 
0079     priv->txdescphys = dma_map_single(priv->device,
0080                       (void __force *)priv->tx_dma_desc,
0081                       priv->txdescmem, DMA_TO_DEVICE);
0082 
0083     if (dma_mapping_error(priv->device, priv->txdescphys)) {
0084         sgdma_uninitialize(priv);
0085         netdev_err(priv->dev, "error mapping tx descriptor memory\n");
0086         return -EINVAL;
0087     }
0088 
0089     /* Initialize descriptor memory to all 0's, sync memory to cache */
0090     memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
0091     memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
0092 
0093     dma_sync_single_for_device(priv->device, priv->txdescphys,
0094                    priv->txdescmem, DMA_TO_DEVICE);
0095 
0096     dma_sync_single_for_device(priv->device, priv->rxdescphys,
0097                    priv->rxdescmem, DMA_TO_DEVICE);
0098 
0099     return 0;
0100 }
0101 
0102 void sgdma_uninitialize(struct altera_tse_private *priv)
0103 {
0104     if (priv->rxdescphys)
0105         dma_unmap_single(priv->device, priv->rxdescphys,
0106                  priv->rxdescmem, DMA_BIDIRECTIONAL);
0107 
0108     if (priv->txdescphys)
0109         dma_unmap_single(priv->device, priv->txdescphys,
0110                  priv->txdescmem, DMA_TO_DEVICE);
0111 }
0112 
0113 /* This function resets the SGDMA controller and clears the
0114  * descriptor memory used for transmits and receives.
0115  */
0116 void sgdma_reset(struct altera_tse_private *priv)
0117 {
0118     /* Initialize descriptor memory to 0 */
0119     memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
0120     memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
0121 
0122     csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
0123     csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
0124 
0125     csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
0126     csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
0127 }
0128 
0129 /* For SGDMA, interrupts remain enabled after initially enabling,
0130  * so no need to provide implementations for abstract enable
0131  * and disable
0132  */
0133 
0134 void sgdma_enable_rxirq(struct altera_tse_private *priv)
0135 {
0136 }
0137 
0138 void sgdma_enable_txirq(struct altera_tse_private *priv)
0139 {
0140 }
0141 
0142 void sgdma_disable_rxirq(struct altera_tse_private *priv)
0143 {
0144 }
0145 
0146 void sgdma_disable_txirq(struct altera_tse_private *priv)
0147 {
0148 }
0149 
0150 void sgdma_clear_rxirq(struct altera_tse_private *priv)
0151 {
0152     tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
0153             SGDMA_CTRLREG_CLRINT);
0154 }
0155 
0156 void sgdma_clear_txirq(struct altera_tse_private *priv)
0157 {
0158     tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
0159             SGDMA_CTRLREG_CLRINT);
0160 }
0161 
0162 /* transmits buffer through SGDMA. Returns number of buffers
0163  * transmitted, 0 if not possible.
0164  *
0165  * tx_lock is held by the caller
0166  */
0167 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
0168 {
0169     struct sgdma_descrip __iomem *descbase =
0170         (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
0171 
0172     struct sgdma_descrip __iomem *cdesc = &descbase[0];
0173     struct sgdma_descrip __iomem *ndesc = &descbase[1];
0174 
0175     /* wait 'til the tx sgdma is ready for the next transmit request */
0176     if (sgdma_txbusy(priv))
0177         return 0;
0178 
0179     sgdma_setup_descrip(cdesc,          /* current descriptor */
0180                 ndesc,          /* next descriptor */
0181                 sgdma_txphysaddr(priv, ndesc),
0182                 buffer->dma_addr,       /* address of packet to xmit */
0183                 0,              /* write addr 0 for tx dma */
0184                 buffer->len,        /* length of packet */
0185                 SGDMA_CONTROL_EOP,      /* Generate EOP */
0186                 0,              /* read fixed */
0187                 SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
0188 
0189     sgdma_async_write(priv, cdesc);
0190 
0191     /* enqueue the request to the pending transmit queue */
0192     queue_tx(priv, buffer);
0193 
0194     return 1;
0195 }
0196 
0197 
0198 /* tx_lock held to protect access to queued tx list
0199  */
0200 u32 sgdma_tx_completions(struct altera_tse_private *priv)
0201 {
0202     u32 ready = 0;
0203 
0204     if (!sgdma_txbusy(priv) &&
0205         ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
0206          & SGDMA_CONTROL_HW_OWNED) == 0) &&
0207         (dequeue_tx(priv))) {
0208         ready = 1;
0209     }
0210 
0211     return ready;
0212 }
0213 
0214 void sgdma_start_rxdma(struct altera_tse_private *priv)
0215 {
0216     sgdma_async_read(priv);
0217 }
0218 
0219 void sgdma_add_rx_desc(struct altera_tse_private *priv,
0220                struct tse_buffer *rxbuffer)
0221 {
0222     queue_rx(priv, rxbuffer);
0223 }
0224 
0225 /* status is returned on upper 16 bits,
0226  * length is returned in lower 16 bits
0227  */
0228 u32 sgdma_rx_status(struct altera_tse_private *priv)
0229 {
0230     struct sgdma_descrip __iomem *base =
0231         (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
0232     struct sgdma_descrip __iomem *desc = NULL;
0233     struct tse_buffer *rxbuffer = NULL;
0234     unsigned int rxstatus = 0;
0235 
0236     u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
0237 
0238     desc = &base[0];
0239     if (sts & SGDMA_STSREG_EOP) {
0240         unsigned int pktlength = 0;
0241         unsigned int pktstatus = 0;
0242         dma_sync_single_for_cpu(priv->device,
0243                     priv->rxdescphys,
0244                     SGDMA_DESC_LEN,
0245                     DMA_FROM_DEVICE);
0246 
0247         pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
0248         pktstatus = csrrd8(desc, sgdma_descroffs(status));
0249         rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
0250         rxstatus = rxstatus << 16;
0251         rxstatus |= (pktlength & 0xffff);
0252 
0253         if (rxstatus) {
0254             csrwr8(0, desc, sgdma_descroffs(status));
0255 
0256             rxbuffer = dequeue_rx(priv);
0257             if (rxbuffer == NULL)
0258                 netdev_info(priv->dev,
0259                         "sgdma rx and rx queue empty!\n");
0260 
0261             /* Clear control */
0262             csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
0263             /* clear status */
0264             csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
0265 
0266             /* kick the rx sgdma after reaping this descriptor */
0267             sgdma_async_read(priv);
0268 
0269         } else {
0270             /* If the SGDMA indicated an end of packet on recv,
0271              * then it's expected that the rxstatus from the
0272              * descriptor is non-zero - meaning a valid packet
0273              * with a nonzero length, or an error has been
0274              * indicated. if not, then all we can do is signal
0275              * an error and return no packet received. Most likely
0276              * there is a system design error, or an error in the
0277              * underlying kernel (cache or cache management problem)
0278              */
0279             netdev_err(priv->dev,
0280                    "SGDMA RX Error Info: %x, %x, %x\n",
0281                    sts, csrrd8(desc, sgdma_descroffs(status)),
0282                    rxstatus);
0283         }
0284     } else if (sts == 0) {
0285         sgdma_async_read(priv);
0286     }
0287 
0288     return rxstatus;
0289 }
0290 
0291 
0292 /* Private functions */
0293 static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
0294                 struct sgdma_descrip __iomem *ndesc,
0295                 dma_addr_t ndesc_phys,
0296                 dma_addr_t raddr,
0297                 dma_addr_t waddr,
0298                 u16 length,
0299                 int generate_eop,
0300                 int rfixed,
0301                 int wfixed)
0302 {
0303     /* Clear the next descriptor as not owned by hardware */
0304 
0305     u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
0306     ctrl &= ~SGDMA_CONTROL_HW_OWNED;
0307     csrwr8(ctrl, ndesc, sgdma_descroffs(control));
0308 
0309     ctrl = SGDMA_CONTROL_HW_OWNED;
0310     ctrl |= generate_eop;
0311     ctrl |= rfixed;
0312     ctrl |= wfixed;
0313 
0314     /* Channel is implicitly zero, initialized to 0 by default */
0315     csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
0316     csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
0317 
0318     csrwr32(0, desc, sgdma_descroffs(pad1));
0319     csrwr32(0, desc, sgdma_descroffs(pad2));
0320     csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
0321 
0322     csrwr8(ctrl, desc, sgdma_descroffs(control));
0323     csrwr8(0, desc, sgdma_descroffs(status));
0324     csrwr8(0, desc, sgdma_descroffs(wburst));
0325     csrwr8(0, desc, sgdma_descroffs(rburst));
0326     csrwr16(length, desc, sgdma_descroffs(bytes));
0327     csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
0328 }
0329 
0330 /* If hardware is busy, don't restart async read.
0331  * if status register is 0 - meaning initial state, restart async read,
0332  * probably for the first time when populating a receive buffer.
0333  * If read status indicate not busy and a status, restart the async
0334  * DMA read.
0335  */
0336 static int sgdma_async_read(struct altera_tse_private *priv)
0337 {
0338     struct sgdma_descrip __iomem *descbase =
0339         (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
0340 
0341     struct sgdma_descrip __iomem *cdesc = &descbase[0];
0342     struct sgdma_descrip __iomem *ndesc = &descbase[1];
0343     struct tse_buffer *rxbuffer = NULL;
0344 
0345     if (!sgdma_rxbusy(priv)) {
0346         rxbuffer = queue_rx_peekhead(priv);
0347         if (rxbuffer == NULL) {
0348             netdev_err(priv->dev, "no rx buffers available\n");
0349             return 0;
0350         }
0351 
0352         sgdma_setup_descrip(cdesc,      /* current descriptor */
0353                     ndesc,      /* next descriptor */
0354                     sgdma_rxphysaddr(priv, ndesc),
0355                     0,          /* read addr 0 for rx dma */
0356                     rxbuffer->dma_addr, /* write addr for rx dma */
0357                     0,          /* read 'til EOP */
0358                     0,          /* EOP: NA for rx dma */
0359                     0,          /* read fixed: NA for rx dma */
0360                     0);         /* SOP: NA for rx DMA */
0361 
0362         dma_sync_single_for_device(priv->device,
0363                        priv->rxdescphys,
0364                        SGDMA_DESC_LEN,
0365                        DMA_TO_DEVICE);
0366 
0367         csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
0368             priv->rx_dma_csr,
0369             sgdma_csroffs(next_descrip));
0370 
0371         csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
0372             priv->rx_dma_csr,
0373             sgdma_csroffs(control));
0374 
0375         return 1;
0376     }
0377 
0378     return 0;
0379 }
0380 
0381 static int sgdma_async_write(struct altera_tse_private *priv,
0382                  struct sgdma_descrip __iomem *desc)
0383 {
0384     if (sgdma_txbusy(priv))
0385         return 0;
0386 
0387     /* clear control and status */
0388     csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
0389     csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
0390 
0391     dma_sync_single_for_device(priv->device, priv->txdescphys,
0392                    SGDMA_DESC_LEN, DMA_TO_DEVICE);
0393 
0394     csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
0395         priv->tx_dma_csr,
0396         sgdma_csroffs(next_descrip));
0397 
0398     csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
0399         priv->tx_dma_csr,
0400         sgdma_csroffs(control));
0401 
0402     return 1;
0403 }
0404 
0405 static dma_addr_t
0406 sgdma_txphysaddr(struct altera_tse_private *priv,
0407          struct sgdma_descrip __iomem *desc)
0408 {
0409     dma_addr_t paddr = priv->txdescmem_busaddr;
0410     uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
0411     return (dma_addr_t)((uintptr_t)paddr + offs);
0412 }
0413 
0414 static dma_addr_t
0415 sgdma_rxphysaddr(struct altera_tse_private *priv,
0416          struct sgdma_descrip __iomem *desc)
0417 {
0418     dma_addr_t paddr = priv->rxdescmem_busaddr;
0419     uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
0420     return (dma_addr_t)((uintptr_t)paddr + offs);
0421 }
0422 
0423 #define list_remove_head(list, entry, type, member)         \
0424     do {                                \
0425         entry = NULL;                       \
0426         if (!list_empty(list)) {                \
0427             entry = list_entry((list)->next, type, member); \
0428             list_del_init(&entry->member);          \
0429         }                           \
0430     } while (0)
0431 
0432 #define list_peek_head(list, entry, type, member)           \
0433     do {                                \
0434         entry = NULL;                       \
0435         if (!list_empty(list)) {                \
0436             entry = list_entry((list)->next, type, member); \
0437         }                           \
0438     } while (0)
0439 
0440 /* adds a tse_buffer to the tail of a tx buffer list.
0441  * assumes the caller is managing and holding a mutual exclusion
0442  * primitive to avoid simultaneous pushes/pops to the list.
0443  */
0444 static void
0445 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
0446 {
0447     list_add_tail(&buffer->lh, &priv->txlisthd);
0448 }
0449 
0450 
0451 /* adds a tse_buffer to the tail of a rx buffer list
0452  * assumes the caller is managing and holding a mutual exclusion
0453  * primitive to avoid simultaneous pushes/pops to the list.
0454  */
0455 static void
0456 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
0457 {
0458     list_add_tail(&buffer->lh, &priv->rxlisthd);
0459 }
0460 
0461 /* dequeues a tse_buffer from the transmit buffer list, otherwise
0462  * returns NULL if empty.
0463  * assumes the caller is managing and holding a mutual exclusion
0464  * primitive to avoid simultaneous pushes/pops to the list.
0465  */
0466 static struct tse_buffer *
0467 dequeue_tx(struct altera_tse_private *priv)
0468 {
0469     struct tse_buffer *buffer = NULL;
0470     list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
0471     return buffer;
0472 }
0473 
0474 /* dequeues a tse_buffer from the receive buffer list, otherwise
0475  * returns NULL if empty
0476  * assumes the caller is managing and holding a mutual exclusion
0477  * primitive to avoid simultaneous pushes/pops to the list.
0478  */
0479 static struct tse_buffer *
0480 dequeue_rx(struct altera_tse_private *priv)
0481 {
0482     struct tse_buffer *buffer = NULL;
0483     list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
0484     return buffer;
0485 }
0486 
0487 /* dequeues a tse_buffer from the receive buffer list, otherwise
0488  * returns NULL if empty
0489  * assumes the caller is managing and holding a mutual exclusion
0490  * primitive to avoid simultaneous pushes/pops to the list while the
0491  * head is being examined.
0492  */
0493 static struct tse_buffer *
0494 queue_rx_peekhead(struct altera_tse_private *priv)
0495 {
0496     struct tse_buffer *buffer = NULL;
0497     list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
0498     return buffer;
0499 }
0500 
0501 /* check and return rx sgdma status without polling
0502  */
0503 static int sgdma_rxbusy(struct altera_tse_private *priv)
0504 {
0505     return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
0506                & SGDMA_STSREG_BUSY;
0507 }
0508 
0509 /* waits for the tx sgdma to finish it's current operation, returns 0
0510  * when it transitions to nonbusy, returns 1 if the operation times out
0511  */
0512 static int sgdma_txbusy(struct altera_tse_private *priv)
0513 {
0514     int delay = 0;
0515 
0516     /* if DMA is busy, wait for current transaction to finish */
0517     while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
0518         & SGDMA_STSREG_BUSY) && (delay++ < 100))
0519         udelay(1);
0520 
0521     if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
0522         & SGDMA_STSREG_BUSY) {
0523         netdev_err(priv->dev, "timeout waiting for tx dma\n");
0524         return 1;
0525     }
0526     return 0;
0527 }