0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/completion.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/dmaengine.h>
0011 #include <linux/irqreturn.h>
0012 #include <linux/jiffies.h>
0013 #include <linux/module.h>
0014 #include <linux/pci.h>
0015 #include <linux/platform_data/dma-dw.h>
0016 #include <linux/spi/spi.h>
0017 #include <linux/types.h>
0018
0019 #include "spi-dw.h"
0020
0021 #define DW_SPI_RX_BUSY 0
0022 #define DW_SPI_RX_BURST_LEVEL 16
0023 #define DW_SPI_TX_BUSY 1
0024 #define DW_SPI_TX_BURST_LEVEL 16
0025
0026 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
0027 {
0028 struct dw_dma_slave *s = param;
0029
0030 if (s->dma_dev != chan->device->dev)
0031 return false;
0032
0033 chan->private = s;
0034 return true;
0035 }
0036
0037 static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
0038 {
0039 struct dma_slave_caps caps;
0040 u32 max_burst, def_burst;
0041 int ret;
0042
0043 def_burst = dws->fifo_len / 2;
0044
0045 ret = dma_get_slave_caps(dws->rxchan, &caps);
0046 if (!ret && caps.max_burst)
0047 max_burst = caps.max_burst;
0048 else
0049 max_burst = DW_SPI_RX_BURST_LEVEL;
0050
0051 dws->rxburst = min(max_burst, def_burst);
0052 dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
0053
0054 ret = dma_get_slave_caps(dws->txchan, &caps);
0055 if (!ret && caps.max_burst)
0056 max_burst = caps.max_burst;
0057 else
0058 max_burst = DW_SPI_TX_BURST_LEVEL;
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 dws->txburst = min(max_burst, def_burst);
0072 dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
0073 }
0074
0075 static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
0076 {
0077 struct dma_slave_caps tx = {0}, rx = {0};
0078
0079 dma_get_slave_caps(dws->txchan, &tx);
0080 dma_get_slave_caps(dws->rxchan, &rx);
0081
0082 if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
0083 dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
0084 else if (tx.max_sg_burst > 0)
0085 dws->dma_sg_burst = tx.max_sg_burst;
0086 else if (rx.max_sg_burst > 0)
0087 dws->dma_sg_burst = rx.max_sg_burst;
0088 else
0089 dws->dma_sg_burst = 0;
0090 }
0091
0092 static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
0093 {
0094 struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
0095 struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
0096 struct pci_dev *dma_dev;
0097 dma_cap_mask_t mask;
0098
0099
0100
0101
0102
0103 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
0104 if (!dma_dev)
0105 return -ENODEV;
0106
0107 dma_cap_zero(mask);
0108 dma_cap_set(DMA_SLAVE, mask);
0109
0110
0111 rx->dma_dev = &dma_dev->dev;
0112 dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
0113 if (!dws->rxchan)
0114 goto err_exit;
0115
0116
0117 tx->dma_dev = &dma_dev->dev;
0118 dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
0119 if (!dws->txchan)
0120 goto free_rxchan;
0121
0122 dws->master->dma_rx = dws->rxchan;
0123 dws->master->dma_tx = dws->txchan;
0124
0125 init_completion(&dws->dma_completion);
0126
0127 dw_spi_dma_maxburst_init(dws);
0128
0129 dw_spi_dma_sg_burst_init(dws);
0130
0131 return 0;
0132
0133 free_rxchan:
0134 dma_release_channel(dws->rxchan);
0135 dws->rxchan = NULL;
0136 err_exit:
0137 return -EBUSY;
0138 }
0139
0140 static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
0141 {
0142 int ret;
0143
0144 dws->rxchan = dma_request_chan(dev, "rx");
0145 if (IS_ERR(dws->rxchan)) {
0146 ret = PTR_ERR(dws->rxchan);
0147 dws->rxchan = NULL;
0148 goto err_exit;
0149 }
0150
0151 dws->txchan = dma_request_chan(dev, "tx");
0152 if (IS_ERR(dws->txchan)) {
0153 ret = PTR_ERR(dws->txchan);
0154 dws->txchan = NULL;
0155 goto free_rxchan;
0156 }
0157
0158 dws->master->dma_rx = dws->rxchan;
0159 dws->master->dma_tx = dws->txchan;
0160
0161 init_completion(&dws->dma_completion);
0162
0163 dw_spi_dma_maxburst_init(dws);
0164
0165 dw_spi_dma_sg_burst_init(dws);
0166
0167 return 0;
0168
0169 free_rxchan:
0170 dma_release_channel(dws->rxchan);
0171 dws->rxchan = NULL;
0172 err_exit:
0173 return ret;
0174 }
0175
0176 static void dw_spi_dma_exit(struct dw_spi *dws)
0177 {
0178 if (dws->txchan) {
0179 dmaengine_terminate_sync(dws->txchan);
0180 dma_release_channel(dws->txchan);
0181 }
0182
0183 if (dws->rxchan) {
0184 dmaengine_terminate_sync(dws->rxchan);
0185 dma_release_channel(dws->rxchan);
0186 }
0187 }
0188
0189 static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
0190 {
0191 dw_spi_check_status(dws, false);
0192
0193 complete(&dws->dma_completion);
0194
0195 return IRQ_HANDLED;
0196 }
0197
0198 static bool dw_spi_can_dma(struct spi_controller *master,
0199 struct spi_device *spi, struct spi_transfer *xfer)
0200 {
0201 struct dw_spi *dws = spi_controller_get_devdata(master);
0202
0203 return xfer->len > dws->fifo_len;
0204 }
0205
0206 static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
0207 {
0208 if (n_bytes == 1)
0209 return DMA_SLAVE_BUSWIDTH_1_BYTE;
0210 else if (n_bytes == 2)
0211 return DMA_SLAVE_BUSWIDTH_2_BYTES;
0212
0213 return DMA_SLAVE_BUSWIDTH_UNDEFINED;
0214 }
0215
0216 static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
0217 {
0218 unsigned long long ms;
0219
0220 ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
0221 do_div(ms, speed);
0222 ms += ms + 200;
0223
0224 if (ms > UINT_MAX)
0225 ms = UINT_MAX;
0226
0227 ms = wait_for_completion_timeout(&dws->dma_completion,
0228 msecs_to_jiffies(ms));
0229
0230 if (ms == 0) {
0231 dev_err(&dws->master->cur_msg->spi->dev,
0232 "DMA transaction timed out\n");
0233 return -ETIMEDOUT;
0234 }
0235
0236 return 0;
0237 }
0238
0239 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
0240 {
0241 return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT);
0242 }
0243
0244 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
0245 struct spi_transfer *xfer)
0246 {
0247 int retry = DW_SPI_WAIT_RETRIES;
0248 struct spi_delay delay;
0249 u32 nents;
0250
0251 nents = dw_readl(dws, DW_SPI_TXFLR);
0252 delay.unit = SPI_DELAY_UNIT_SCK;
0253 delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
0254
0255 while (dw_spi_dma_tx_busy(dws) && retry--)
0256 spi_delay_exec(&delay, xfer);
0257
0258 if (retry < 0) {
0259 dev_err(&dws->master->dev, "Tx hanged up\n");
0260 return -EIO;
0261 }
0262
0263 return 0;
0264 }
0265
0266
0267
0268
0269
0270 static void dw_spi_dma_tx_done(void *arg)
0271 {
0272 struct dw_spi *dws = arg;
0273
0274 clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
0275 if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy))
0276 return;
0277
0278 complete(&dws->dma_completion);
0279 }
0280
0281 static int dw_spi_dma_config_tx(struct dw_spi *dws)
0282 {
0283 struct dma_slave_config txconf;
0284
0285 memset(&txconf, 0, sizeof(txconf));
0286 txconf.direction = DMA_MEM_TO_DEV;
0287 txconf.dst_addr = dws->dma_addr;
0288 txconf.dst_maxburst = dws->txburst;
0289 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0290 txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
0291 txconf.device_fc = false;
0292
0293 return dmaengine_slave_config(dws->txchan, &txconf);
0294 }
0295
0296 static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
0297 unsigned int nents)
0298 {
0299 struct dma_async_tx_descriptor *txdesc;
0300 dma_cookie_t cookie;
0301 int ret;
0302
0303 txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
0304 DMA_MEM_TO_DEV,
0305 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0306 if (!txdesc)
0307 return -ENOMEM;
0308
0309 txdesc->callback = dw_spi_dma_tx_done;
0310 txdesc->callback_param = dws;
0311
0312 cookie = dmaengine_submit(txdesc);
0313 ret = dma_submit_error(cookie);
0314 if (ret) {
0315 dmaengine_terminate_sync(dws->txchan);
0316 return ret;
0317 }
0318
0319 set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
0320
0321 return 0;
0322 }
0323
0324 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
0325 {
0326 return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT);
0327 }
0328
0329 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
0330 {
0331 int retry = DW_SPI_WAIT_RETRIES;
0332 struct spi_delay delay;
0333 unsigned long ns, us;
0334 u32 nents;
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 nents = dw_readl(dws, DW_SPI_RXFLR);
0346 ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
0347 if (ns <= NSEC_PER_USEC) {
0348 delay.unit = SPI_DELAY_UNIT_NSECS;
0349 delay.value = ns;
0350 } else {
0351 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
0352 delay.unit = SPI_DELAY_UNIT_USECS;
0353 delay.value = clamp_val(us, 0, USHRT_MAX);
0354 }
0355
0356 while (dw_spi_dma_rx_busy(dws) && retry--)
0357 spi_delay_exec(&delay, NULL);
0358
0359 if (retry < 0) {
0360 dev_err(&dws->master->dev, "Rx hanged up\n");
0361 return -EIO;
0362 }
0363
0364 return 0;
0365 }
0366
0367
0368
0369
0370
0371 static void dw_spi_dma_rx_done(void *arg)
0372 {
0373 struct dw_spi *dws = arg;
0374
0375 clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
0376 if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy))
0377 return;
0378
0379 complete(&dws->dma_completion);
0380 }
0381
0382 static int dw_spi_dma_config_rx(struct dw_spi *dws)
0383 {
0384 struct dma_slave_config rxconf;
0385
0386 memset(&rxconf, 0, sizeof(rxconf));
0387 rxconf.direction = DMA_DEV_TO_MEM;
0388 rxconf.src_addr = dws->dma_addr;
0389 rxconf.src_maxburst = dws->rxburst;
0390 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0391 rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
0392 rxconf.device_fc = false;
0393
0394 return dmaengine_slave_config(dws->rxchan, &rxconf);
0395 }
0396
0397 static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
0398 unsigned int nents)
0399 {
0400 struct dma_async_tx_descriptor *rxdesc;
0401 dma_cookie_t cookie;
0402 int ret;
0403
0404 rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
0405 DMA_DEV_TO_MEM,
0406 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0407 if (!rxdesc)
0408 return -ENOMEM;
0409
0410 rxdesc->callback = dw_spi_dma_rx_done;
0411 rxdesc->callback_param = dws;
0412
0413 cookie = dmaengine_submit(rxdesc);
0414 ret = dma_submit_error(cookie);
0415 if (ret) {
0416 dmaengine_terminate_sync(dws->rxchan);
0417 return ret;
0418 }
0419
0420 set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
0421
0422 return 0;
0423 }
0424
0425 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
0426 {
0427 u16 imr, dma_ctrl;
0428 int ret;
0429
0430 if (!xfer->tx_buf)
0431 return -EINVAL;
0432
0433
0434 ret = dw_spi_dma_config_tx(dws);
0435 if (ret)
0436 return ret;
0437
0438 if (xfer->rx_buf) {
0439 ret = dw_spi_dma_config_rx(dws);
0440 if (ret)
0441 return ret;
0442 }
0443
0444
0445 dma_ctrl = DW_SPI_DMACR_TDMAE;
0446 if (xfer->rx_buf)
0447 dma_ctrl |= DW_SPI_DMACR_RDMAE;
0448 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
0449
0450
0451 imr = DW_SPI_INT_TXOI;
0452 if (xfer->rx_buf)
0453 imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI;
0454 dw_spi_umask_intr(dws, imr);
0455
0456 reinit_completion(&dws->dma_completion);
0457
0458 dws->transfer_handler = dw_spi_dma_transfer_handler;
0459
0460 return 0;
0461 }
0462
0463 static int dw_spi_dma_transfer_all(struct dw_spi *dws,
0464 struct spi_transfer *xfer)
0465 {
0466 int ret;
0467
0468
0469 ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
0470 if (ret)
0471 goto err_clear_dmac;
0472
0473
0474 if (xfer->rx_buf) {
0475 ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
0476 xfer->rx_sg.nents);
0477 if (ret)
0478 goto err_clear_dmac;
0479
0480
0481 dma_async_issue_pending(dws->rxchan);
0482 }
0483
0484 dma_async_issue_pending(dws->txchan);
0485
0486 ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
0487
0488 err_clear_dmac:
0489 dw_writel(dws, DW_SPI_DMACR, 0);
0490
0491 return ret;
0492 }
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 static int dw_spi_dma_transfer_one(struct dw_spi *dws,
0527 struct spi_transfer *xfer)
0528 {
0529 struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp;
0530 unsigned int tx_len = 0, rx_len = 0;
0531 unsigned int base, len;
0532 int ret;
0533
0534 sg_init_table(&tx_tmp, 1);
0535 sg_init_table(&rx_tmp, 1);
0536
0537 for (base = 0, len = 0; base < xfer->len; base += len) {
0538
0539 if (!tx_len) {
0540 tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
0541 sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg);
0542 tx_len = sg_dma_len(tx_sg);
0543 }
0544
0545
0546 if (!rx_len) {
0547 rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg);
0548 sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg);
0549 rx_len = sg_dma_len(rx_sg);
0550 }
0551
0552 len = min(tx_len, rx_len);
0553
0554 sg_dma_len(&tx_tmp) = len;
0555 sg_dma_len(&rx_tmp) = len;
0556
0557
0558 ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1);
0559 if (ret)
0560 break;
0561
0562
0563 ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1);
0564 if (ret)
0565 break;
0566
0567
0568 dma_async_issue_pending(dws->rxchan);
0569
0570 dma_async_issue_pending(dws->txchan);
0571
0572
0573
0574
0575
0576
0577
0578 ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz);
0579 if (ret)
0580 break;
0581
0582 reinit_completion(&dws->dma_completion);
0583
0584 sg_dma_address(&tx_tmp) += len;
0585 sg_dma_address(&rx_tmp) += len;
0586 tx_len -= len;
0587 rx_len -= len;
0588 }
0589
0590 dw_writel(dws, DW_SPI_DMACR, 0);
0591
0592 return ret;
0593 }
0594
0595 static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
0596 {
0597 unsigned int nents;
0598 int ret;
0599
0600 nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents);
0601
0602
0603
0604
0605
0606
0607
0608
0609 if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst)
0610 ret = dw_spi_dma_transfer_all(dws, xfer);
0611 else
0612 ret = dw_spi_dma_transfer_one(dws, xfer);
0613 if (ret)
0614 return ret;
0615
0616 if (dws->master->cur_msg->status == -EINPROGRESS) {
0617 ret = dw_spi_dma_wait_tx_done(dws, xfer);
0618 if (ret)
0619 return ret;
0620 }
0621
0622 if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
0623 ret = dw_spi_dma_wait_rx_done(dws);
0624
0625 return ret;
0626 }
0627
0628 static void dw_spi_dma_stop(struct dw_spi *dws)
0629 {
0630 if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) {
0631 dmaengine_terminate_sync(dws->txchan);
0632 clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
0633 }
0634 if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) {
0635 dmaengine_terminate_sync(dws->rxchan);
0636 clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
0637 }
0638 }
0639
0640 static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
0641 .dma_init = dw_spi_dma_init_mfld,
0642 .dma_exit = dw_spi_dma_exit,
0643 .dma_setup = dw_spi_dma_setup,
0644 .can_dma = dw_spi_can_dma,
0645 .dma_transfer = dw_spi_dma_transfer,
0646 .dma_stop = dw_spi_dma_stop,
0647 };
0648
0649 void dw_spi_dma_setup_mfld(struct dw_spi *dws)
0650 {
0651 dws->dma_ops = &dw_spi_dma_mfld_ops;
0652 }
0653 EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE);
0654
0655 static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
0656 .dma_init = dw_spi_dma_init_generic,
0657 .dma_exit = dw_spi_dma_exit,
0658 .dma_setup = dw_spi_dma_setup,
0659 .can_dma = dw_spi_can_dma,
0660 .dma_transfer = dw_spi_dma_transfer,
0661 .dma_stop = dw_spi_dma_stop,
0662 };
0663
0664 void dw_spi_dma_setup_generic(struct dw_spi *dws)
0665 {
0666 dws->dma_ops = &dw_spi_dma_generic_ops;
0667 }
0668 EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE);