0001
0002
0003
0004
0005
0006 #include <linux/list.h>
0007 #include "altera_utils.h"
0008 #include "altera_tse.h"
0009 #include "altera_sgdmahw.h"
0010 #include "altera_sgdma.h"
0011
0012 static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
0013 struct sgdma_descrip __iomem *ndesc,
0014 dma_addr_t ndesc_phys,
0015 dma_addr_t raddr,
0016 dma_addr_t waddr,
0017 u16 length,
0018 int generate_eop,
0019 int rfixed,
0020 int wfixed);
0021
0022 static int sgdma_async_write(struct altera_tse_private *priv,
0023 struct sgdma_descrip __iomem *desc);
0024
0025 static int sgdma_async_read(struct altera_tse_private *priv);
0026
0027 static dma_addr_t
0028 sgdma_txphysaddr(struct altera_tse_private *priv,
0029 struct sgdma_descrip __iomem *desc);
0030
0031 static dma_addr_t
0032 sgdma_rxphysaddr(struct altera_tse_private *priv,
0033 struct sgdma_descrip __iomem *desc);
0034
0035 static int sgdma_txbusy(struct altera_tse_private *priv);
0036
0037 static int sgdma_rxbusy(struct altera_tse_private *priv);
0038
0039 static void
0040 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
0041
0042 static void
0043 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
0044
0045 static struct tse_buffer *
0046 dequeue_tx(struct altera_tse_private *priv);
0047
0048 static struct tse_buffer *
0049 dequeue_rx(struct altera_tse_private *priv);
0050
0051 static struct tse_buffer *
0052 queue_rx_peekhead(struct altera_tse_private *priv);
0053
0054 int sgdma_initialize(struct altera_tse_private *priv)
0055 {
0056 priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
0057 SGDMA_CTRLREG_INTEN;
0058
0059 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
0060 SGDMA_CTRLREG_INTEN |
0061 SGDMA_CTRLREG_ILASTD;
0062
0063 INIT_LIST_HEAD(&priv->txlisthd);
0064 INIT_LIST_HEAD(&priv->rxlisthd);
0065
0066 priv->rxdescphys = (dma_addr_t) 0;
0067 priv->txdescphys = (dma_addr_t) 0;
0068
0069 priv->rxdescphys = dma_map_single(priv->device,
0070 (void __force *)priv->rx_dma_desc,
0071 priv->rxdescmem, DMA_BIDIRECTIONAL);
0072
0073 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
0074 sgdma_uninitialize(priv);
0075 netdev_err(priv->dev, "error mapping rx descriptor memory\n");
0076 return -EINVAL;
0077 }
0078
0079 priv->txdescphys = dma_map_single(priv->device,
0080 (void __force *)priv->tx_dma_desc,
0081 priv->txdescmem, DMA_TO_DEVICE);
0082
0083 if (dma_mapping_error(priv->device, priv->txdescphys)) {
0084 sgdma_uninitialize(priv);
0085 netdev_err(priv->dev, "error mapping tx descriptor memory\n");
0086 return -EINVAL;
0087 }
0088
0089
0090 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
0091 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
0092
0093 dma_sync_single_for_device(priv->device, priv->txdescphys,
0094 priv->txdescmem, DMA_TO_DEVICE);
0095
0096 dma_sync_single_for_device(priv->device, priv->rxdescphys,
0097 priv->rxdescmem, DMA_TO_DEVICE);
0098
0099 return 0;
0100 }
0101
0102 void sgdma_uninitialize(struct altera_tse_private *priv)
0103 {
0104 if (priv->rxdescphys)
0105 dma_unmap_single(priv->device, priv->rxdescphys,
0106 priv->rxdescmem, DMA_BIDIRECTIONAL);
0107
0108 if (priv->txdescphys)
0109 dma_unmap_single(priv->device, priv->txdescphys,
0110 priv->txdescmem, DMA_TO_DEVICE);
0111 }
0112
0113
0114
0115
0116 void sgdma_reset(struct altera_tse_private *priv)
0117 {
0118
0119 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
0120 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
0121
0122 csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
0123 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
0124
0125 csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
0126 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
0127 }
0128
0129
0130
0131
0132
0133
0134 void sgdma_enable_rxirq(struct altera_tse_private *priv)
0135 {
0136 }
0137
0138 void sgdma_enable_txirq(struct altera_tse_private *priv)
0139 {
0140 }
0141
0142 void sgdma_disable_rxirq(struct altera_tse_private *priv)
0143 {
0144 }
0145
0146 void sgdma_disable_txirq(struct altera_tse_private *priv)
0147 {
0148 }
0149
0150 void sgdma_clear_rxirq(struct altera_tse_private *priv)
0151 {
0152 tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
0153 SGDMA_CTRLREG_CLRINT);
0154 }
0155
0156 void sgdma_clear_txirq(struct altera_tse_private *priv)
0157 {
0158 tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
0159 SGDMA_CTRLREG_CLRINT);
0160 }
0161
0162
0163
0164
0165
0166
0167 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
0168 {
0169 struct sgdma_descrip __iomem *descbase =
0170 (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
0171
0172 struct sgdma_descrip __iomem *cdesc = &descbase[0];
0173 struct sgdma_descrip __iomem *ndesc = &descbase[1];
0174
0175
0176 if (sgdma_txbusy(priv))
0177 return 0;
0178
0179 sgdma_setup_descrip(cdesc,
0180 ndesc,
0181 sgdma_txphysaddr(priv, ndesc),
0182 buffer->dma_addr,
0183 0,
0184 buffer->len,
0185 SGDMA_CONTROL_EOP,
0186 0,
0187 SGDMA_CONTROL_WR_FIXED);
0188
0189 sgdma_async_write(priv, cdesc);
0190
0191
0192 queue_tx(priv, buffer);
0193
0194 return 1;
0195 }
0196
0197
0198
0199
0200 u32 sgdma_tx_completions(struct altera_tse_private *priv)
0201 {
0202 u32 ready = 0;
0203
0204 if (!sgdma_txbusy(priv) &&
0205 ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
0206 & SGDMA_CONTROL_HW_OWNED) == 0) &&
0207 (dequeue_tx(priv))) {
0208 ready = 1;
0209 }
0210
0211 return ready;
0212 }
0213
0214 void sgdma_start_rxdma(struct altera_tse_private *priv)
0215 {
0216 sgdma_async_read(priv);
0217 }
0218
0219 void sgdma_add_rx_desc(struct altera_tse_private *priv,
0220 struct tse_buffer *rxbuffer)
0221 {
0222 queue_rx(priv, rxbuffer);
0223 }
0224
0225
0226
0227
0228 u32 sgdma_rx_status(struct altera_tse_private *priv)
0229 {
0230 struct sgdma_descrip __iomem *base =
0231 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
0232 struct sgdma_descrip __iomem *desc = NULL;
0233 struct tse_buffer *rxbuffer = NULL;
0234 unsigned int rxstatus = 0;
0235
0236 u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
0237
0238 desc = &base[0];
0239 if (sts & SGDMA_STSREG_EOP) {
0240 unsigned int pktlength = 0;
0241 unsigned int pktstatus = 0;
0242 dma_sync_single_for_cpu(priv->device,
0243 priv->rxdescphys,
0244 SGDMA_DESC_LEN,
0245 DMA_FROM_DEVICE);
0246
0247 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
0248 pktstatus = csrrd8(desc, sgdma_descroffs(status));
0249 rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
0250 rxstatus = rxstatus << 16;
0251 rxstatus |= (pktlength & 0xffff);
0252
0253 if (rxstatus) {
0254 csrwr8(0, desc, sgdma_descroffs(status));
0255
0256 rxbuffer = dequeue_rx(priv);
0257 if (rxbuffer == NULL)
0258 netdev_info(priv->dev,
0259 "sgdma rx and rx queue empty!\n");
0260
0261
0262 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
0263
0264 csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
0265
0266
0267 sgdma_async_read(priv);
0268
0269 } else {
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279 netdev_err(priv->dev,
0280 "SGDMA RX Error Info: %x, %x, %x\n",
0281 sts, csrrd8(desc, sgdma_descroffs(status)),
0282 rxstatus);
0283 }
0284 } else if (sts == 0) {
0285 sgdma_async_read(priv);
0286 }
0287
0288 return rxstatus;
0289 }
0290
0291
0292
0293 static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
0294 struct sgdma_descrip __iomem *ndesc,
0295 dma_addr_t ndesc_phys,
0296 dma_addr_t raddr,
0297 dma_addr_t waddr,
0298 u16 length,
0299 int generate_eop,
0300 int rfixed,
0301 int wfixed)
0302 {
0303
0304
0305 u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
0306 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
0307 csrwr8(ctrl, ndesc, sgdma_descroffs(control));
0308
0309 ctrl = SGDMA_CONTROL_HW_OWNED;
0310 ctrl |= generate_eop;
0311 ctrl |= rfixed;
0312 ctrl |= wfixed;
0313
0314
0315 csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
0316 csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
0317
0318 csrwr32(0, desc, sgdma_descroffs(pad1));
0319 csrwr32(0, desc, sgdma_descroffs(pad2));
0320 csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
0321
0322 csrwr8(ctrl, desc, sgdma_descroffs(control));
0323 csrwr8(0, desc, sgdma_descroffs(status));
0324 csrwr8(0, desc, sgdma_descroffs(wburst));
0325 csrwr8(0, desc, sgdma_descroffs(rburst));
0326 csrwr16(length, desc, sgdma_descroffs(bytes));
0327 csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
0328 }
0329
0330
0331
0332
0333
0334
0335
0336 static int sgdma_async_read(struct altera_tse_private *priv)
0337 {
0338 struct sgdma_descrip __iomem *descbase =
0339 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
0340
0341 struct sgdma_descrip __iomem *cdesc = &descbase[0];
0342 struct sgdma_descrip __iomem *ndesc = &descbase[1];
0343 struct tse_buffer *rxbuffer = NULL;
0344
0345 if (!sgdma_rxbusy(priv)) {
0346 rxbuffer = queue_rx_peekhead(priv);
0347 if (rxbuffer == NULL) {
0348 netdev_err(priv->dev, "no rx buffers available\n");
0349 return 0;
0350 }
0351
0352 sgdma_setup_descrip(cdesc,
0353 ndesc,
0354 sgdma_rxphysaddr(priv, ndesc),
0355 0,
0356 rxbuffer->dma_addr,
0357 0,
0358 0,
0359 0,
0360 0);
0361
0362 dma_sync_single_for_device(priv->device,
0363 priv->rxdescphys,
0364 SGDMA_DESC_LEN,
0365 DMA_TO_DEVICE);
0366
0367 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
0368 priv->rx_dma_csr,
0369 sgdma_csroffs(next_descrip));
0370
0371 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
0372 priv->rx_dma_csr,
0373 sgdma_csroffs(control));
0374
0375 return 1;
0376 }
0377
0378 return 0;
0379 }
0380
0381 static int sgdma_async_write(struct altera_tse_private *priv,
0382 struct sgdma_descrip __iomem *desc)
0383 {
0384 if (sgdma_txbusy(priv))
0385 return 0;
0386
0387
0388 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
0389 csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
0390
0391 dma_sync_single_for_device(priv->device, priv->txdescphys,
0392 SGDMA_DESC_LEN, DMA_TO_DEVICE);
0393
0394 csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
0395 priv->tx_dma_csr,
0396 sgdma_csroffs(next_descrip));
0397
0398 csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
0399 priv->tx_dma_csr,
0400 sgdma_csroffs(control));
0401
0402 return 1;
0403 }
0404
0405 static dma_addr_t
0406 sgdma_txphysaddr(struct altera_tse_private *priv,
0407 struct sgdma_descrip __iomem *desc)
0408 {
0409 dma_addr_t paddr = priv->txdescmem_busaddr;
0410 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
0411 return (dma_addr_t)((uintptr_t)paddr + offs);
0412 }
0413
0414 static dma_addr_t
0415 sgdma_rxphysaddr(struct altera_tse_private *priv,
0416 struct sgdma_descrip __iomem *desc)
0417 {
0418 dma_addr_t paddr = priv->rxdescmem_busaddr;
0419 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
0420 return (dma_addr_t)((uintptr_t)paddr + offs);
0421 }
0422
0423 #define list_remove_head(list, entry, type, member) \
0424 do { \
0425 entry = NULL; \
0426 if (!list_empty(list)) { \
0427 entry = list_entry((list)->next, type, member); \
0428 list_del_init(&entry->member); \
0429 } \
0430 } while (0)
0431
0432 #define list_peek_head(list, entry, type, member) \
0433 do { \
0434 entry = NULL; \
0435 if (!list_empty(list)) { \
0436 entry = list_entry((list)->next, type, member); \
0437 } \
0438 } while (0)
0439
0440
0441
0442
0443
0444 static void
0445 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
0446 {
0447 list_add_tail(&buffer->lh, &priv->txlisthd);
0448 }
0449
0450
0451
0452
0453
0454
0455 static void
0456 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
0457 {
0458 list_add_tail(&buffer->lh, &priv->rxlisthd);
0459 }
0460
0461
0462
0463
0464
0465
0466 static struct tse_buffer *
0467 dequeue_tx(struct altera_tse_private *priv)
0468 {
0469 struct tse_buffer *buffer = NULL;
0470 list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
0471 return buffer;
0472 }
0473
0474
0475
0476
0477
0478
0479 static struct tse_buffer *
0480 dequeue_rx(struct altera_tse_private *priv)
0481 {
0482 struct tse_buffer *buffer = NULL;
0483 list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
0484 return buffer;
0485 }
0486
0487
0488
0489
0490
0491
0492
0493 static struct tse_buffer *
0494 queue_rx_peekhead(struct altera_tse_private *priv)
0495 {
0496 struct tse_buffer *buffer = NULL;
0497 list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
0498 return buffer;
0499 }
0500
0501
0502
0503 static int sgdma_rxbusy(struct altera_tse_private *priv)
0504 {
0505 return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
0506 & SGDMA_STSREG_BUSY;
0507 }
0508
0509
0510
0511
0512 static int sgdma_txbusy(struct altera_tse_private *priv)
0513 {
0514 int delay = 0;
0515
0516
0517 while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
0518 & SGDMA_STSREG_BUSY) && (delay++ < 100))
0519 udelay(1);
0520
0521 if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
0522 & SGDMA_STSREG_BUSY) {
0523 netdev_err(priv->dev, "timeout waiting for tx dma\n");
0524 return 1;
0525 }
0526 return 0;
0527 }