Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 //
0003 // Copyright 2013 Freescale Semiconductor, Inc.
0004 // Copyright 2020 NXP
0005 //
0006 // Freescale DSPI driver
0007 // This file contains a driver for the Freescale DSPI
0008 
0009 #include <linux/clk.h>
0010 #include <linux/delay.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/of_device.h>
0017 #include <linux/pinctrl/consumer.h>
0018 #include <linux/regmap.h>
0019 #include <linux/spi/spi.h>
0020 #include <linux/spi/spi-fsl-dspi.h>
0021 
0022 #define DRIVER_NAME         "fsl-dspi"
0023 
0024 #define SPI_MCR             0x00
0025 #define SPI_MCR_MASTER          BIT(31)
0026 #define SPI_MCR_PCSIS(x)        ((x) << 16)
0027 #define SPI_MCR_CLR_TXF         BIT(11)
0028 #define SPI_MCR_CLR_RXF         BIT(10)
0029 #define SPI_MCR_XSPI            BIT(3)
0030 #define SPI_MCR_DIS_TXF         BIT(13)
0031 #define SPI_MCR_DIS_RXF         BIT(12)
0032 #define SPI_MCR_HALT            BIT(0)
0033 
0034 #define SPI_TCR             0x08
0035 #define SPI_TCR_GET_TCNT(x)     (((x) & GENMASK(31, 16)) >> 16)
0036 
0037 #define SPI_CTAR(x)         (0x0c + (((x) & GENMASK(1, 0)) * 4))
0038 #define SPI_CTAR_FMSZ(x)        (((x) << 27) & GENMASK(30, 27))
0039 #define SPI_CTAR_CPOL           BIT(26)
0040 #define SPI_CTAR_CPHA           BIT(25)
0041 #define SPI_CTAR_LSBFE          BIT(24)
0042 #define SPI_CTAR_PCSSCK(x)      (((x) << 22) & GENMASK(23, 22))
0043 #define SPI_CTAR_PASC(x)        (((x) << 20) & GENMASK(21, 20))
0044 #define SPI_CTAR_PDT(x)         (((x) << 18) & GENMASK(19, 18))
0045 #define SPI_CTAR_PBR(x)         (((x) << 16) & GENMASK(17, 16))
0046 #define SPI_CTAR_CSSCK(x)       (((x) << 12) & GENMASK(15, 12))
0047 #define SPI_CTAR_ASC(x)         (((x) << 8) & GENMASK(11, 8))
0048 #define SPI_CTAR_DT(x)          (((x) << 4) & GENMASK(7, 4))
0049 #define SPI_CTAR_BR(x)          ((x) & GENMASK(3, 0))
0050 #define SPI_CTAR_SCALE_BITS     0xf
0051 
0052 #define SPI_CTAR0_SLAVE         0x0c
0053 
0054 #define SPI_SR              0x2c
0055 #define SPI_SR_TCFQF            BIT(31)
0056 #define SPI_SR_TFUF         BIT(27)
0057 #define SPI_SR_TFFF         BIT(25)
0058 #define SPI_SR_CMDTCF           BIT(23)
0059 #define SPI_SR_SPEF         BIT(21)
0060 #define SPI_SR_RFOF         BIT(19)
0061 #define SPI_SR_TFIWF            BIT(18)
0062 #define SPI_SR_RFDF         BIT(17)
0063 #define SPI_SR_CMDFFF           BIT(16)
0064 #define SPI_SR_CLEAR            (SPI_SR_TCFQF | \
0065                     SPI_SR_TFUF | SPI_SR_TFFF | \
0066                     SPI_SR_CMDTCF | SPI_SR_SPEF | \
0067                     SPI_SR_RFOF | SPI_SR_TFIWF | \
0068                     SPI_SR_RFDF | SPI_SR_CMDFFF)
0069 
0070 #define SPI_RSER_TFFFE          BIT(25)
0071 #define SPI_RSER_TFFFD          BIT(24)
0072 #define SPI_RSER_RFDFE          BIT(17)
0073 #define SPI_RSER_RFDFD          BIT(16)
0074 
0075 #define SPI_RSER            0x30
0076 #define SPI_RSER_TCFQE          BIT(31)
0077 #define SPI_RSER_CMDTCFE        BIT(23)
0078 
0079 #define SPI_PUSHR           0x34
0080 #define SPI_PUSHR_CMD_CONT      BIT(15)
0081 #define SPI_PUSHR_CMD_CTAS(x)       (((x) << 12 & GENMASK(14, 12)))
0082 #define SPI_PUSHR_CMD_EOQ       BIT(11)
0083 #define SPI_PUSHR_CMD_CTCNT     BIT(10)
0084 #define SPI_PUSHR_CMD_PCS(x)        (BIT(x) & GENMASK(5, 0))
0085 
0086 #define SPI_PUSHR_SLAVE         0x34
0087 
0088 #define SPI_POPR            0x38
0089 
0090 #define SPI_TXFR0           0x3c
0091 #define SPI_TXFR1           0x40
0092 #define SPI_TXFR2           0x44
0093 #define SPI_TXFR3           0x48
0094 #define SPI_RXFR0           0x7c
0095 #define SPI_RXFR1           0x80
0096 #define SPI_RXFR2           0x84
0097 #define SPI_RXFR3           0x88
0098 
0099 #define SPI_CTARE(x)            (0x11c + (((x) & GENMASK(1, 0)) * 4))
0100 #define SPI_CTARE_FMSZE(x)      (((x) & 0x1) << 16)
0101 #define SPI_CTARE_DTCP(x)       ((x) & 0x7ff)
0102 
0103 #define SPI_SREX            0x13c
0104 
0105 #define SPI_FRAME_BITS(bits)        SPI_CTAR_FMSZ((bits) - 1)
0106 #define SPI_FRAME_EBITS(bits)       SPI_CTARE_FMSZE(((bits) - 1) >> 4)
0107 
0108 #define DMA_COMPLETION_TIMEOUT      msecs_to_jiffies(3000)
0109 
0110 struct chip_data {
0111     u32         ctar_val;
0112 };
0113 
0114 enum dspi_trans_mode {
0115     DSPI_XSPI_MODE,
0116     DSPI_DMA_MODE,
0117 };
0118 
0119 struct fsl_dspi_devtype_data {
0120     enum dspi_trans_mode    trans_mode;
0121     u8          max_clock_factor;
0122     int         fifo_size;
0123 };
0124 
0125 enum {
0126     LS1021A,
0127     LS1012A,
0128     LS1028A,
0129     LS1043A,
0130     LS1046A,
0131     LS2080A,
0132     LS2085A,
0133     LX2160A,
0134     MCF5441X,
0135     VF610,
0136 };
0137 
0138 static const struct fsl_dspi_devtype_data devtype_data[] = {
0139     [VF610] = {
0140         .trans_mode     = DSPI_DMA_MODE,
0141         .max_clock_factor   = 2,
0142         .fifo_size      = 4,
0143     },
0144     [LS1021A] = {
0145         /* Has A-011218 DMA erratum */
0146         .trans_mode     = DSPI_XSPI_MODE,
0147         .max_clock_factor   = 8,
0148         .fifo_size      = 4,
0149     },
0150     [LS1012A] = {
0151         /* Has A-011218 DMA erratum */
0152         .trans_mode     = DSPI_XSPI_MODE,
0153         .max_clock_factor   = 8,
0154         .fifo_size      = 16,
0155     },
0156     [LS1028A] = {
0157         .trans_mode     = DSPI_XSPI_MODE,
0158         .max_clock_factor   = 8,
0159         .fifo_size      = 4,
0160     },
0161     [LS1043A] = {
0162         /* Has A-011218 DMA erratum */
0163         .trans_mode     = DSPI_XSPI_MODE,
0164         .max_clock_factor   = 8,
0165         .fifo_size      = 16,
0166     },
0167     [LS1046A] = {
0168         /* Has A-011218 DMA erratum */
0169         .trans_mode     = DSPI_XSPI_MODE,
0170         .max_clock_factor   = 8,
0171         .fifo_size      = 16,
0172     },
0173     [LS2080A] = {
0174         .trans_mode     = DSPI_XSPI_MODE,
0175         .max_clock_factor   = 8,
0176         .fifo_size      = 4,
0177     },
0178     [LS2085A] = {
0179         .trans_mode     = DSPI_XSPI_MODE,
0180         .max_clock_factor   = 8,
0181         .fifo_size      = 4,
0182     },
0183     [LX2160A] = {
0184         .trans_mode     = DSPI_XSPI_MODE,
0185         .max_clock_factor   = 8,
0186         .fifo_size      = 4,
0187     },
0188     [MCF5441X] = {
0189         .trans_mode     = DSPI_DMA_MODE,
0190         .max_clock_factor   = 8,
0191         .fifo_size      = 16,
0192     },
0193 };
0194 
0195 struct fsl_dspi_dma {
0196     u32                 *tx_dma_buf;
0197     struct dma_chan             *chan_tx;
0198     dma_addr_t              tx_dma_phys;
0199     struct completion           cmd_tx_complete;
0200     struct dma_async_tx_descriptor      *tx_desc;
0201 
0202     u32                 *rx_dma_buf;
0203     struct dma_chan             *chan_rx;
0204     dma_addr_t              rx_dma_phys;
0205     struct completion           cmd_rx_complete;
0206     struct dma_async_tx_descriptor      *rx_desc;
0207 };
0208 
0209 struct fsl_dspi {
0210     struct spi_controller           *ctlr;
0211     struct platform_device          *pdev;
0212 
0213     struct regmap               *regmap;
0214     struct regmap               *regmap_pushr;
0215     int                 irq;
0216     struct clk              *clk;
0217 
0218     struct spi_transfer         *cur_transfer;
0219     struct spi_message          *cur_msg;
0220     struct chip_data            *cur_chip;
0221     size_t                  progress;
0222     size_t                  len;
0223     const void              *tx;
0224     void                    *rx;
0225     u16                 tx_cmd;
0226     const struct fsl_dspi_devtype_data  *devtype_data;
0227 
0228     struct completion           xfer_done;
0229 
0230     struct fsl_dspi_dma         *dma;
0231 
0232     int                 oper_word_size;
0233     int                 oper_bits_per_word;
0234 
0235     int                 words_in_flight;
0236 
0237     /*
0238      * Offsets for CMD and TXDATA within SPI_PUSHR when accessed
0239      * individually (in XSPI mode)
0240      */
0241     int                 pushr_cmd;
0242     int                 pushr_tx;
0243 
0244     void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
0245     void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
0246 };
0247 
0248 static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
0249 {
0250     switch (dspi->oper_word_size) {
0251     case 1:
0252         *txdata = *(u8 *)dspi->tx;
0253         break;
0254     case 2:
0255         *txdata = *(u16 *)dspi->tx;
0256         break;
0257     case 4:
0258         *txdata = *(u32 *)dspi->tx;
0259         break;
0260     }
0261     dspi->tx += dspi->oper_word_size;
0262 }
0263 
0264 static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
0265 {
0266     switch (dspi->oper_word_size) {
0267     case 1:
0268         *(u8 *)dspi->rx = rxdata;
0269         break;
0270     case 2:
0271         *(u16 *)dspi->rx = rxdata;
0272         break;
0273     case 4:
0274         *(u32 *)dspi->rx = rxdata;
0275         break;
0276     }
0277     dspi->rx += dspi->oper_word_size;
0278 }
0279 
0280 static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
0281 {
0282     *txdata = cpu_to_be32(*(u32 *)dspi->tx);
0283     dspi->tx += sizeof(u32);
0284 }
0285 
0286 static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
0287 {
0288     *(u32 *)dspi->rx = be32_to_cpu(rxdata);
0289     dspi->rx += sizeof(u32);
0290 }
0291 
0292 static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
0293 {
0294     *txdata = cpu_to_be16(*(u16 *)dspi->tx);
0295     dspi->tx += sizeof(u16);
0296 }
0297 
0298 static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
0299 {
0300     *(u16 *)dspi->rx = be16_to_cpu(rxdata);
0301     dspi->rx += sizeof(u16);
0302 }
0303 
0304 static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
0305 {
0306     u16 hi = *(u16 *)dspi->tx;
0307     u16 lo = *(u16 *)(dspi->tx + 2);
0308 
0309     *txdata = (u32)hi << 16 | lo;
0310     dspi->tx += sizeof(u32);
0311 }
0312 
0313 static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
0314 {
0315     u16 hi = rxdata & 0xffff;
0316     u16 lo = rxdata >> 16;
0317 
0318     *(u16 *)dspi->rx = lo;
0319     *(u16 *)(dspi->rx + 2) = hi;
0320     dspi->rx += sizeof(u32);
0321 }
0322 
0323 /*
0324  * Pop one word from the TX buffer for pushing into the
0325  * PUSHR register (TX FIFO)
0326  */
0327 static u32 dspi_pop_tx(struct fsl_dspi *dspi)
0328 {
0329     u32 txdata = 0;
0330 
0331     if (dspi->tx)
0332         dspi->host_to_dev(dspi, &txdata);
0333     dspi->len -= dspi->oper_word_size;
0334     return txdata;
0335 }
0336 
0337 /* Prepare one TX FIFO entry (txdata plus cmd) */
0338 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
0339 {
0340     u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
0341 
0342     if (spi_controller_is_slave(dspi->ctlr))
0343         return data;
0344 
0345     if (dspi->len > 0)
0346         cmd |= SPI_PUSHR_CMD_CONT;
0347     return cmd << 16 | data;
0348 }
0349 
0350 /* Push one word to the RX buffer from the POPR register (RX FIFO) */
0351 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
0352 {
0353     if (!dspi->rx)
0354         return;
0355     dspi->dev_to_host(dspi, rxdata);
0356 }
0357 
0358 static void dspi_tx_dma_callback(void *arg)
0359 {
0360     struct fsl_dspi *dspi = arg;
0361     struct fsl_dspi_dma *dma = dspi->dma;
0362 
0363     complete(&dma->cmd_tx_complete);
0364 }
0365 
0366 static void dspi_rx_dma_callback(void *arg)
0367 {
0368     struct fsl_dspi *dspi = arg;
0369     struct fsl_dspi_dma *dma = dspi->dma;
0370     int i;
0371 
0372     if (dspi->rx) {
0373         for (i = 0; i < dspi->words_in_flight; i++)
0374             dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
0375     }
0376 
0377     complete(&dma->cmd_rx_complete);
0378 }
0379 
0380 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
0381 {
0382     struct device *dev = &dspi->pdev->dev;
0383     struct fsl_dspi_dma *dma = dspi->dma;
0384     int time_left;
0385     int i;
0386 
0387     for (i = 0; i < dspi->words_in_flight; i++)
0388         dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
0389 
0390     dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
0391                     dma->tx_dma_phys,
0392                     dspi->words_in_flight *
0393                     DMA_SLAVE_BUSWIDTH_4_BYTES,
0394                     DMA_MEM_TO_DEV,
0395                     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0396     if (!dma->tx_desc) {
0397         dev_err(dev, "Not able to get desc for DMA xfer\n");
0398         return -EIO;
0399     }
0400 
0401     dma->tx_desc->callback = dspi_tx_dma_callback;
0402     dma->tx_desc->callback_param = dspi;
0403     if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
0404         dev_err(dev, "DMA submit failed\n");
0405         return -EINVAL;
0406     }
0407 
0408     dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
0409                     dma->rx_dma_phys,
0410                     dspi->words_in_flight *
0411                     DMA_SLAVE_BUSWIDTH_4_BYTES,
0412                     DMA_DEV_TO_MEM,
0413                     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0414     if (!dma->rx_desc) {
0415         dev_err(dev, "Not able to get desc for DMA xfer\n");
0416         return -EIO;
0417     }
0418 
0419     dma->rx_desc->callback = dspi_rx_dma_callback;
0420     dma->rx_desc->callback_param = dspi;
0421     if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
0422         dev_err(dev, "DMA submit failed\n");
0423         return -EINVAL;
0424     }
0425 
0426     reinit_completion(&dspi->dma->cmd_rx_complete);
0427     reinit_completion(&dspi->dma->cmd_tx_complete);
0428 
0429     dma_async_issue_pending(dma->chan_rx);
0430     dma_async_issue_pending(dma->chan_tx);
0431 
0432     if (spi_controller_is_slave(dspi->ctlr)) {
0433         wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
0434         return 0;
0435     }
0436 
0437     time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
0438                         DMA_COMPLETION_TIMEOUT);
0439     if (time_left == 0) {
0440         dev_err(dev, "DMA tx timeout\n");
0441         dmaengine_terminate_all(dma->chan_tx);
0442         dmaengine_terminate_all(dma->chan_rx);
0443         return -ETIMEDOUT;
0444     }
0445 
0446     time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
0447                         DMA_COMPLETION_TIMEOUT);
0448     if (time_left == 0) {
0449         dev_err(dev, "DMA rx timeout\n");
0450         dmaengine_terminate_all(dma->chan_tx);
0451         dmaengine_terminate_all(dma->chan_rx);
0452         return -ETIMEDOUT;
0453     }
0454 
0455     return 0;
0456 }
0457 
0458 static void dspi_setup_accel(struct fsl_dspi *dspi);
0459 
0460 static int dspi_dma_xfer(struct fsl_dspi *dspi)
0461 {
0462     struct spi_message *message = dspi->cur_msg;
0463     struct device *dev = &dspi->pdev->dev;
0464     int ret = 0;
0465 
0466     /*
0467      * dspi->len gets decremented by dspi_pop_tx_pushr in
0468      * dspi_next_xfer_dma_submit
0469      */
0470     while (dspi->len) {
0471         /* Figure out operational bits-per-word for this chunk */
0472         dspi_setup_accel(dspi);
0473 
0474         dspi->words_in_flight = dspi->len / dspi->oper_word_size;
0475         if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
0476             dspi->words_in_flight = dspi->devtype_data->fifo_size;
0477 
0478         message->actual_length += dspi->words_in_flight *
0479                       dspi->oper_word_size;
0480 
0481         ret = dspi_next_xfer_dma_submit(dspi);
0482         if (ret) {
0483             dev_err(dev, "DMA transfer failed\n");
0484             break;
0485         }
0486     }
0487 
0488     return ret;
0489 }
0490 
0491 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
0492 {
0493     int dma_bufsize = dspi->devtype_data->fifo_size * 2;
0494     struct device *dev = &dspi->pdev->dev;
0495     struct dma_slave_config cfg;
0496     struct fsl_dspi_dma *dma;
0497     int ret;
0498 
0499     dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
0500     if (!dma)
0501         return -ENOMEM;
0502 
0503     dma->chan_rx = dma_request_chan(dev, "rx");
0504     if (IS_ERR(dma->chan_rx)) {
0505         dev_err(dev, "rx dma channel not available\n");
0506         ret = PTR_ERR(dma->chan_rx);
0507         return ret;
0508     }
0509 
0510     dma->chan_tx = dma_request_chan(dev, "tx");
0511     if (IS_ERR(dma->chan_tx)) {
0512         dev_err(dev, "tx dma channel not available\n");
0513         ret = PTR_ERR(dma->chan_tx);
0514         goto err_tx_channel;
0515     }
0516 
0517     dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
0518                          dma_bufsize, &dma->tx_dma_phys,
0519                          GFP_KERNEL);
0520     if (!dma->tx_dma_buf) {
0521         ret = -ENOMEM;
0522         goto err_tx_dma_buf;
0523     }
0524 
0525     dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
0526                          dma_bufsize, &dma->rx_dma_phys,
0527                          GFP_KERNEL);
0528     if (!dma->rx_dma_buf) {
0529         ret = -ENOMEM;
0530         goto err_rx_dma_buf;
0531     }
0532 
0533     memset(&cfg, 0, sizeof(cfg));
0534     cfg.src_addr = phy_addr + SPI_POPR;
0535     cfg.dst_addr = phy_addr + SPI_PUSHR;
0536     cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0537     cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0538     cfg.src_maxburst = 1;
0539     cfg.dst_maxburst = 1;
0540 
0541     cfg.direction = DMA_DEV_TO_MEM;
0542     ret = dmaengine_slave_config(dma->chan_rx, &cfg);
0543     if (ret) {
0544         dev_err(dev, "can't configure rx dma channel\n");
0545         ret = -EINVAL;
0546         goto err_slave_config;
0547     }
0548 
0549     cfg.direction = DMA_MEM_TO_DEV;
0550     ret = dmaengine_slave_config(dma->chan_tx, &cfg);
0551     if (ret) {
0552         dev_err(dev, "can't configure tx dma channel\n");
0553         ret = -EINVAL;
0554         goto err_slave_config;
0555     }
0556 
0557     dspi->dma = dma;
0558     init_completion(&dma->cmd_tx_complete);
0559     init_completion(&dma->cmd_rx_complete);
0560 
0561     return 0;
0562 
0563 err_slave_config:
0564     dma_free_coherent(dma->chan_rx->device->dev,
0565               dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
0566 err_rx_dma_buf:
0567     dma_free_coherent(dma->chan_tx->device->dev,
0568               dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
0569 err_tx_dma_buf:
0570     dma_release_channel(dma->chan_tx);
0571 err_tx_channel:
0572     dma_release_channel(dma->chan_rx);
0573 
0574     devm_kfree(dev, dma);
0575     dspi->dma = NULL;
0576 
0577     return ret;
0578 }
0579 
0580 static void dspi_release_dma(struct fsl_dspi *dspi)
0581 {
0582     int dma_bufsize = dspi->devtype_data->fifo_size * 2;
0583     struct fsl_dspi_dma *dma = dspi->dma;
0584 
0585     if (!dma)
0586         return;
0587 
0588     if (dma->chan_tx) {
0589         dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
0590                   dma->tx_dma_buf, dma->tx_dma_phys);
0591         dma_release_channel(dma->chan_tx);
0592     }
0593 
0594     if (dma->chan_rx) {
0595         dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
0596                   dma->rx_dma_buf, dma->rx_dma_phys);
0597         dma_release_channel(dma->chan_rx);
0598     }
0599 }
0600 
0601 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
0602                unsigned long clkrate)
0603 {
0604     /* Valid baud rate pre-scaler values */
0605     int pbr_tbl[4] = {2, 3, 5, 7};
0606     int brs[16] = { 2,  4,  6,  8,
0607             16, 32, 64, 128,
0608             256,    512,    1024,   2048,
0609             4096,   8192,   16384,  32768 };
0610     int scale_needed, scale, minscale = INT_MAX;
0611     int i, j;
0612 
0613     scale_needed = clkrate / speed_hz;
0614     if (clkrate % speed_hz)
0615         scale_needed++;
0616 
0617     for (i = 0; i < ARRAY_SIZE(brs); i++)
0618         for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
0619             scale = brs[i] * pbr_tbl[j];
0620             if (scale >= scale_needed) {
0621                 if (scale < minscale) {
0622                     minscale = scale;
0623                     *br = i;
0624                     *pbr = j;
0625                 }
0626                 break;
0627             }
0628         }
0629 
0630     if (minscale == INT_MAX) {
0631         pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
0632             speed_hz, clkrate);
0633         *pbr = ARRAY_SIZE(pbr_tbl) - 1;
0634         *br =  ARRAY_SIZE(brs) - 1;
0635     }
0636 }
0637 
0638 static void ns_delay_scale(char *psc, char *sc, int delay_ns,
0639                unsigned long clkrate)
0640 {
0641     int scale_needed, scale, minscale = INT_MAX;
0642     int pscale_tbl[4] = {1, 3, 5, 7};
0643     u32 remainder;
0644     int i, j;
0645 
0646     scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
0647                    &remainder);
0648     if (remainder)
0649         scale_needed++;
0650 
0651     for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
0652         for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
0653             scale = pscale_tbl[i] * (2 << j);
0654             if (scale >= scale_needed) {
0655                 if (scale < minscale) {
0656                     minscale = scale;
0657                     *psc = i;
0658                     *sc = j;
0659                 }
0660                 break;
0661             }
0662         }
0663 
0664     if (minscale == INT_MAX) {
0665         pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
0666             delay_ns, clkrate);
0667         *psc = ARRAY_SIZE(pscale_tbl) - 1;
0668         *sc = SPI_CTAR_SCALE_BITS;
0669     }
0670 }
0671 
0672 static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
0673 {
0674     /*
0675      * The only time when the PCS doesn't need continuation after this word
0676      * is when it's last. We need to look ahead, because we actually call
0677      * dspi_pop_tx (the function that decrements dspi->len) _after_
0678      * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
0679      * word is enough. If there's more to transmit than that,
0680      * dspi_xspi_write will know to split the FIFO writes in 2, and
0681      * generate a new PUSHR command with the final word that will have PCS
0682      * deasserted (not continued) here.
0683      */
0684     if (dspi->len > dspi->oper_word_size)
0685         cmd |= SPI_PUSHR_CMD_CONT;
0686     regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
0687 }
0688 
0689 static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
0690 {
0691     regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
0692 }
0693 
0694 static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
0695 {
0696     int num_bytes = num_words * dspi->oper_word_size;
0697     u16 tx_cmd = dspi->tx_cmd;
0698 
0699     /*
0700      * If the PCS needs to de-assert (i.e. we're at the end of the buffer
0701      * and cs_change does not want the PCS to stay on), then we need a new
0702      * PUSHR command, since this one (for the body of the buffer)
0703      * necessarily has the CONT bit set.
0704      * So send one word less during this go, to force a split and a command
0705      * with a single word next time, when CONT will be unset.
0706      */
0707     if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
0708         tx_cmd |= SPI_PUSHR_CMD_EOQ;
0709 
0710     /* Update CTARE */
0711     regmap_write(dspi->regmap, SPI_CTARE(0),
0712              SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
0713              SPI_CTARE_DTCP(num_words));
0714 
0715     /*
0716      * Write the CMD FIFO entry first, and then the two
0717      * corresponding TX FIFO entries (or one...).
0718      */
0719     dspi_pushr_cmd_write(dspi, tx_cmd);
0720 
0721     /* Fill TX FIFO with as many transfers as possible */
0722     while (num_words--) {
0723         u32 data = dspi_pop_tx(dspi);
0724 
0725         dspi_pushr_txdata_write(dspi, data & 0xFFFF);
0726         if (dspi->oper_bits_per_word > 16)
0727             dspi_pushr_txdata_write(dspi, data >> 16);
0728     }
0729 }
0730 
0731 static u32 dspi_popr_read(struct fsl_dspi *dspi)
0732 {
0733     u32 rxdata = 0;
0734 
0735     regmap_read(dspi->regmap, SPI_POPR, &rxdata);
0736     return rxdata;
0737 }
0738 
0739 static void dspi_fifo_read(struct fsl_dspi *dspi)
0740 {
0741     int num_fifo_entries = dspi->words_in_flight;
0742 
0743     /* Read one FIFO entry and push to rx buffer */
0744     while (num_fifo_entries--)
0745         dspi_push_rx(dspi, dspi_popr_read(dspi));
0746 }
0747 
0748 static void dspi_setup_accel(struct fsl_dspi *dspi)
0749 {
0750     struct spi_transfer *xfer = dspi->cur_transfer;
0751     bool odd = !!(dspi->len & 1);
0752 
0753     /* No accel for frames not multiple of 8 bits at the moment */
0754     if (xfer->bits_per_word % 8)
0755         goto no_accel;
0756 
0757     if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
0758         dspi->oper_bits_per_word = 16;
0759     } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
0760         dspi->oper_bits_per_word = 8;
0761     } else {
0762         /* Start off with maximum supported by hardware */
0763         if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
0764             dspi->oper_bits_per_word = 32;
0765         else
0766             dspi->oper_bits_per_word = 16;
0767 
0768         /*
0769          * And go down only if the buffer can't be sent with
0770          * words this big
0771          */
0772         do {
0773             if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
0774                 break;
0775 
0776             dspi->oper_bits_per_word /= 2;
0777         } while (dspi->oper_bits_per_word > 8);
0778     }
0779 
0780     if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
0781         dspi->dev_to_host = dspi_8on32_dev_to_host;
0782         dspi->host_to_dev = dspi_8on32_host_to_dev;
0783     } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
0784         dspi->dev_to_host = dspi_8on16_dev_to_host;
0785         dspi->host_to_dev = dspi_8on16_host_to_dev;
0786     } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
0787         dspi->dev_to_host = dspi_16on32_dev_to_host;
0788         dspi->host_to_dev = dspi_16on32_host_to_dev;
0789     } else {
0790 no_accel:
0791         dspi->dev_to_host = dspi_native_dev_to_host;
0792         dspi->host_to_dev = dspi_native_host_to_dev;
0793         dspi->oper_bits_per_word = xfer->bits_per_word;
0794     }
0795 
0796     dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
0797 
0798     /*
0799      * Update CTAR here (code is common for XSPI and DMA modes).
0800      * We will update CTARE in the portion specific to XSPI, when we
0801      * also know the preload value (DTCP).
0802      */
0803     regmap_write(dspi->regmap, SPI_CTAR(0),
0804              dspi->cur_chip->ctar_val |
0805              SPI_FRAME_BITS(dspi->oper_bits_per_word));
0806 }
0807 
0808 static void dspi_fifo_write(struct fsl_dspi *dspi)
0809 {
0810     int num_fifo_entries = dspi->devtype_data->fifo_size;
0811     struct spi_transfer *xfer = dspi->cur_transfer;
0812     struct spi_message *msg = dspi->cur_msg;
0813     int num_words, num_bytes;
0814 
0815     dspi_setup_accel(dspi);
0816 
0817     /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
0818     if (dspi->oper_word_size == 4)
0819         num_fifo_entries /= 2;
0820 
0821     /*
0822      * Integer division intentionally trims off odd (or non-multiple of 4)
0823      * numbers of bytes at the end of the buffer, which will be sent next
0824      * time using a smaller oper_word_size.
0825      */
0826     num_words = dspi->len / dspi->oper_word_size;
0827     if (num_words > num_fifo_entries)
0828         num_words = num_fifo_entries;
0829 
0830     /* Update total number of bytes that were transferred */
0831     num_bytes = num_words * dspi->oper_word_size;
0832     msg->actual_length += num_bytes;
0833     dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
0834 
0835     /*
0836      * Update shared variable for use in the next interrupt (both in
0837      * dspi_fifo_read and in dspi_fifo_write).
0838      */
0839     dspi->words_in_flight = num_words;
0840 
0841     spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
0842 
0843     dspi_xspi_fifo_write(dspi, num_words);
0844     /*
0845      * Everything after this point is in a potential race with the next
0846      * interrupt, so we must never use dspi->words_in_flight again since it
0847      * might already be modified by the next dspi_fifo_write.
0848      */
0849 
0850     spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
0851                 dspi->progress, !dspi->irq);
0852 }
0853 
0854 static int dspi_rxtx(struct fsl_dspi *dspi)
0855 {
0856     dspi_fifo_read(dspi);
0857 
0858     if (!dspi->len)
0859         /* Success! */
0860         return 0;
0861 
0862     dspi_fifo_write(dspi);
0863 
0864     return -EINPROGRESS;
0865 }
0866 
0867 static int dspi_poll(struct fsl_dspi *dspi)
0868 {
0869     int tries = 1000;
0870     u32 spi_sr;
0871 
0872     do {
0873         regmap_read(dspi->regmap, SPI_SR, &spi_sr);
0874         regmap_write(dspi->regmap, SPI_SR, spi_sr);
0875 
0876         if (spi_sr & SPI_SR_CMDTCF)
0877             break;
0878     } while (--tries);
0879 
0880     if (!tries)
0881         return -ETIMEDOUT;
0882 
0883     return dspi_rxtx(dspi);
0884 }
0885 
0886 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
0887 {
0888     struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
0889     u32 spi_sr;
0890 
0891     regmap_read(dspi->regmap, SPI_SR, &spi_sr);
0892     regmap_write(dspi->regmap, SPI_SR, spi_sr);
0893 
0894     if (!(spi_sr & SPI_SR_CMDTCF))
0895         return IRQ_NONE;
0896 
0897     if (dspi_rxtx(dspi) == 0)
0898         complete(&dspi->xfer_done);
0899 
0900     return IRQ_HANDLED;
0901 }
0902 
0903 static int dspi_transfer_one_message(struct spi_controller *ctlr,
0904                      struct spi_message *message)
0905 {
0906     struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
0907     struct spi_device *spi = message->spi;
0908     struct spi_transfer *transfer;
0909     int status = 0;
0910 
0911     message->actual_length = 0;
0912 
0913     list_for_each_entry(transfer, &message->transfers, transfer_list) {
0914         dspi->cur_transfer = transfer;
0915         dspi->cur_msg = message;
0916         dspi->cur_chip = spi_get_ctldata(spi);
0917         /* Prepare command word for CMD FIFO */
0918         dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
0919                    SPI_PUSHR_CMD_PCS(spi->chip_select);
0920         if (list_is_last(&dspi->cur_transfer->transfer_list,
0921                  &dspi->cur_msg->transfers)) {
0922             /* Leave PCS activated after last transfer when
0923              * cs_change is set.
0924              */
0925             if (transfer->cs_change)
0926                 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
0927         } else {
0928             /* Keep PCS active between transfers in same message
0929              * when cs_change is not set, and de-activate PCS
0930              * between transfers in the same message when
0931              * cs_change is set.
0932              */
0933             if (!transfer->cs_change)
0934                 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
0935         }
0936 
0937         dspi->tx = transfer->tx_buf;
0938         dspi->rx = transfer->rx_buf;
0939         dspi->len = transfer->len;
0940         dspi->progress = 0;
0941 
0942         regmap_update_bits(dspi->regmap, SPI_MCR,
0943                    SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
0944                    SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
0945 
0946         spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
0947                        dspi->progress, !dspi->irq);
0948 
0949         if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
0950             status = dspi_dma_xfer(dspi);
0951         } else {
0952             dspi_fifo_write(dspi);
0953 
0954             if (dspi->irq) {
0955                 wait_for_completion(&dspi->xfer_done);
0956                 reinit_completion(&dspi->xfer_done);
0957             } else {
0958                 do {
0959                     status = dspi_poll(dspi);
0960                 } while (status == -EINPROGRESS);
0961             }
0962         }
0963         if (status)
0964             break;
0965 
0966         spi_transfer_delay_exec(transfer);
0967     }
0968 
0969     message->status = status;
0970     spi_finalize_current_message(ctlr);
0971 
0972     return status;
0973 }
0974 
0975 static int dspi_setup(struct spi_device *spi)
0976 {
0977     struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
0978     unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
0979     u32 cs_sck_delay = 0, sck_cs_delay = 0;
0980     struct fsl_dspi_platform_data *pdata;
0981     unsigned char pasc = 0, asc = 0;
0982     struct chip_data *chip;
0983     unsigned long clkrate;
0984 
0985     /* Only alloc on first setup */
0986     chip = spi_get_ctldata(spi);
0987     if (chip == NULL) {
0988         chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
0989         if (!chip)
0990             return -ENOMEM;
0991     }
0992 
0993     pdata = dev_get_platdata(&dspi->pdev->dev);
0994 
0995     if (!pdata) {
0996         of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
0997                      &cs_sck_delay);
0998 
0999         of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
1000                      &sck_cs_delay);
1001     } else {
1002         cs_sck_delay = pdata->cs_sck_delay;
1003         sck_cs_delay = pdata->sck_cs_delay;
1004     }
1005 
1006     clkrate = clk_get_rate(dspi->clk);
1007     hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
1008 
1009     /* Set PCS to SCK delay scale values */
1010     ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
1011 
1012     /* Set After SCK delay scale values */
1013     ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
1014 
1015     chip->ctar_val = 0;
1016     if (spi->mode & SPI_CPOL)
1017         chip->ctar_val |= SPI_CTAR_CPOL;
1018     if (spi->mode & SPI_CPHA)
1019         chip->ctar_val |= SPI_CTAR_CPHA;
1020 
1021     if (!spi_controller_is_slave(dspi->ctlr)) {
1022         chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
1023                   SPI_CTAR_CSSCK(cssck) |
1024                   SPI_CTAR_PASC(pasc) |
1025                   SPI_CTAR_ASC(asc) |
1026                   SPI_CTAR_PBR(pbr) |
1027                   SPI_CTAR_BR(br);
1028 
1029         if (spi->mode & SPI_LSB_FIRST)
1030             chip->ctar_val |= SPI_CTAR_LSBFE;
1031     }
1032 
1033     spi_set_ctldata(spi, chip);
1034 
1035     return 0;
1036 }
1037 
1038 static void dspi_cleanup(struct spi_device *spi)
1039 {
1040     struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
1041 
1042     dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
1043         spi->controller->bus_num, spi->chip_select);
1044 
1045     kfree(chip);
1046 }
1047 
1048 static const struct of_device_id fsl_dspi_dt_ids[] = {
1049     {
1050         .compatible = "fsl,vf610-dspi",
1051         .data = &devtype_data[VF610],
1052     }, {
1053         .compatible = "fsl,ls1021a-v1.0-dspi",
1054         .data = &devtype_data[LS1021A],
1055     }, {
1056         .compatible = "fsl,ls1012a-dspi",
1057         .data = &devtype_data[LS1012A],
1058     }, {
1059         .compatible = "fsl,ls1028a-dspi",
1060         .data = &devtype_data[LS1028A],
1061     }, {
1062         .compatible = "fsl,ls1043a-dspi",
1063         .data = &devtype_data[LS1043A],
1064     }, {
1065         .compatible = "fsl,ls1046a-dspi",
1066         .data = &devtype_data[LS1046A],
1067     }, {
1068         .compatible = "fsl,ls2080a-dspi",
1069         .data = &devtype_data[LS2080A],
1070     }, {
1071         .compatible = "fsl,ls2085a-dspi",
1072         .data = &devtype_data[LS2085A],
1073     }, {
1074         .compatible = "fsl,lx2160a-dspi",
1075         .data = &devtype_data[LX2160A],
1076     },
1077     { /* sentinel */ }
1078 };
1079 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
1080 
1081 #ifdef CONFIG_PM_SLEEP
1082 static int dspi_suspend(struct device *dev)
1083 {
1084     struct fsl_dspi *dspi = dev_get_drvdata(dev);
1085 
1086     if (dspi->irq)
1087         disable_irq(dspi->irq);
1088     spi_controller_suspend(dspi->ctlr);
1089     clk_disable_unprepare(dspi->clk);
1090 
1091     pinctrl_pm_select_sleep_state(dev);
1092 
1093     return 0;
1094 }
1095 
1096 static int dspi_resume(struct device *dev)
1097 {
1098     struct fsl_dspi *dspi = dev_get_drvdata(dev);
1099     int ret;
1100 
1101     pinctrl_pm_select_default_state(dev);
1102 
1103     ret = clk_prepare_enable(dspi->clk);
1104     if (ret)
1105         return ret;
1106     spi_controller_resume(dspi->ctlr);
1107     if (dspi->irq)
1108         enable_irq(dspi->irq);
1109 
1110     return 0;
1111 }
1112 #endif /* CONFIG_PM_SLEEP */
1113 
1114 static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
1115 
1116 static const struct regmap_range dspi_volatile_ranges[] = {
1117     regmap_reg_range(SPI_MCR, SPI_TCR),
1118     regmap_reg_range(SPI_SR, SPI_SR),
1119     regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
1120 };
1121 
1122 static const struct regmap_access_table dspi_volatile_table = {
1123     .yes_ranges = dspi_volatile_ranges,
1124     .n_yes_ranges   = ARRAY_SIZE(dspi_volatile_ranges),
1125 };
1126 
1127 static const struct regmap_config dspi_regmap_config = {
1128     .reg_bits   = 32,
1129     .val_bits   = 32,
1130     .reg_stride = 4,
1131     .max_register   = 0x88,
1132     .volatile_table = &dspi_volatile_table,
1133 };
1134 
1135 static const struct regmap_range dspi_xspi_volatile_ranges[] = {
1136     regmap_reg_range(SPI_MCR, SPI_TCR),
1137     regmap_reg_range(SPI_SR, SPI_SR),
1138     regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
1139     regmap_reg_range(SPI_SREX, SPI_SREX),
1140 };
1141 
1142 static const struct regmap_access_table dspi_xspi_volatile_table = {
1143     .yes_ranges = dspi_xspi_volatile_ranges,
1144     .n_yes_ranges   = ARRAY_SIZE(dspi_xspi_volatile_ranges),
1145 };
1146 
1147 static const struct regmap_config dspi_xspi_regmap_config[] = {
1148     {
1149         .reg_bits   = 32,
1150         .val_bits   = 32,
1151         .reg_stride = 4,
1152         .max_register   = 0x13c,
1153         .volatile_table = &dspi_xspi_volatile_table,
1154     },
1155     {
1156         .name       = "pushr",
1157         .reg_bits   = 16,
1158         .val_bits   = 16,
1159         .reg_stride = 2,
1160         .max_register   = 0x2,
1161     },
1162 };
1163 
1164 static int dspi_init(struct fsl_dspi *dspi)
1165 {
1166     unsigned int mcr;
1167 
1168     /* Set idle states for all chip select signals to high */
1169     mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
1170 
1171     if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1172         mcr |= SPI_MCR_XSPI;
1173     if (!spi_controller_is_slave(dspi->ctlr))
1174         mcr |= SPI_MCR_MASTER;
1175 
1176     regmap_write(dspi->regmap, SPI_MCR, mcr);
1177     regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
1178 
1179     switch (dspi->devtype_data->trans_mode) {
1180     case DSPI_XSPI_MODE:
1181         regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
1182         break;
1183     case DSPI_DMA_MODE:
1184         regmap_write(dspi->regmap, SPI_RSER,
1185                  SPI_RSER_TFFFE | SPI_RSER_TFFFD |
1186                  SPI_RSER_RFDFE | SPI_RSER_RFDFD);
1187         break;
1188     default:
1189         dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
1190             dspi->devtype_data->trans_mode);
1191         return -EINVAL;
1192     }
1193 
1194     return 0;
1195 }
1196 
1197 static int dspi_slave_abort(struct spi_master *master)
1198 {
1199     struct fsl_dspi *dspi = spi_master_get_devdata(master);
1200 
1201     /*
1202      * Terminate all pending DMA transactions for the SPI working
1203      * in SLAVE mode.
1204      */
1205     if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1206         dmaengine_terminate_sync(dspi->dma->chan_rx);
1207         dmaengine_terminate_sync(dspi->dma->chan_tx);
1208     }
1209 
1210     /* Clear the internal DSPI RX and TX FIFO buffers */
1211     regmap_update_bits(dspi->regmap, SPI_MCR,
1212                SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
1213                SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
1214 
1215     return 0;
1216 }
1217 
1218 static int dspi_probe(struct platform_device *pdev)
1219 {
1220     struct device_node *np = pdev->dev.of_node;
1221     const struct regmap_config *regmap_config;
1222     struct fsl_dspi_platform_data *pdata;
1223     struct spi_controller *ctlr;
1224     int ret, cs_num, bus_num = -1;
1225     struct fsl_dspi *dspi;
1226     struct resource *res;
1227     void __iomem *base;
1228     bool big_endian;
1229 
1230     dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL);
1231     if (!dspi)
1232         return -ENOMEM;
1233 
1234     ctlr = spi_alloc_master(&pdev->dev, 0);
1235     if (!ctlr)
1236         return -ENOMEM;
1237 
1238     spi_controller_set_devdata(ctlr, dspi);
1239     platform_set_drvdata(pdev, dspi);
1240 
1241     dspi->pdev = pdev;
1242     dspi->ctlr = ctlr;
1243 
1244     ctlr->setup = dspi_setup;
1245     ctlr->transfer_one_message = dspi_transfer_one_message;
1246     ctlr->dev.of_node = pdev->dev.of_node;
1247 
1248     ctlr->cleanup = dspi_cleanup;
1249     ctlr->slave_abort = dspi_slave_abort;
1250     ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1251 
1252     pdata = dev_get_platdata(&pdev->dev);
1253     if (pdata) {
1254         ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num;
1255         ctlr->bus_num = pdata->bus_num;
1256 
1257         /* Only Coldfire uses platform data */
1258         dspi->devtype_data = &devtype_data[MCF5441X];
1259         big_endian = true;
1260     } else {
1261 
1262         ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
1263         if (ret < 0) {
1264             dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1265             goto out_ctlr_put;
1266         }
1267         ctlr->num_chipselect = ctlr->max_native_cs = cs_num;
1268 
1269         of_property_read_u32(np, "bus-num", &bus_num);
1270         ctlr->bus_num = bus_num;
1271 
1272         if (of_property_read_bool(np, "spi-slave"))
1273             ctlr->slave = true;
1274 
1275         dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1276         if (!dspi->devtype_data) {
1277             dev_err(&pdev->dev, "can't get devtype_data\n");
1278             ret = -EFAULT;
1279             goto out_ctlr_put;
1280         }
1281 
1282         big_endian = of_device_is_big_endian(np);
1283     }
1284     if (big_endian) {
1285         dspi->pushr_cmd = 0;
1286         dspi->pushr_tx = 2;
1287     } else {
1288         dspi->pushr_cmd = 2;
1289         dspi->pushr_tx = 0;
1290     }
1291 
1292     if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1293         ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1294     else
1295         ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1296 
1297     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1298     base = devm_ioremap_resource(&pdev->dev, res);
1299     if (IS_ERR(base)) {
1300         ret = PTR_ERR(base);
1301         goto out_ctlr_put;
1302     }
1303 
1304     if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1305         regmap_config = &dspi_xspi_regmap_config[0];
1306     else
1307         regmap_config = &dspi_regmap_config;
1308     dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
1309     if (IS_ERR(dspi->regmap)) {
1310         dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1311                 PTR_ERR(dspi->regmap));
1312         ret = PTR_ERR(dspi->regmap);
1313         goto out_ctlr_put;
1314     }
1315 
1316     if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
1317         dspi->regmap_pushr = devm_regmap_init_mmio(
1318             &pdev->dev, base + SPI_PUSHR,
1319             &dspi_xspi_regmap_config[1]);
1320         if (IS_ERR(dspi->regmap_pushr)) {
1321             dev_err(&pdev->dev,
1322                 "failed to init pushr regmap: %ld\n",
1323                 PTR_ERR(dspi->regmap_pushr));
1324             ret = PTR_ERR(dspi->regmap_pushr);
1325             goto out_ctlr_put;
1326         }
1327     }
1328 
1329     dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1330     if (IS_ERR(dspi->clk)) {
1331         ret = PTR_ERR(dspi->clk);
1332         dev_err(&pdev->dev, "unable to get clock\n");
1333         goto out_ctlr_put;
1334     }
1335     ret = clk_prepare_enable(dspi->clk);
1336     if (ret)
1337         goto out_ctlr_put;
1338 
1339     ret = dspi_init(dspi);
1340     if (ret)
1341         goto out_clk_put;
1342 
1343     dspi->irq = platform_get_irq(pdev, 0);
1344     if (dspi->irq <= 0) {
1345         dev_info(&pdev->dev,
1346              "can't get platform irq, using poll mode\n");
1347         dspi->irq = 0;
1348         goto poll_mode;
1349     }
1350 
1351     init_completion(&dspi->xfer_done);
1352 
1353     ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
1354                    IRQF_SHARED, pdev->name, dspi);
1355     if (ret < 0) {
1356         dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1357         goto out_clk_put;
1358     }
1359 
1360 poll_mode:
1361 
1362     if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1363         ret = dspi_request_dma(dspi, res->start);
1364         if (ret < 0) {
1365             dev_err(&pdev->dev, "can't get dma channels\n");
1366             goto out_free_irq;
1367         }
1368     }
1369 
1370     ctlr->max_speed_hz =
1371         clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1372 
1373     if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
1374         ctlr->ptp_sts_supported = true;
1375 
1376     ret = spi_register_controller(ctlr);
1377     if (ret != 0) {
1378         dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
1379         goto out_release_dma;
1380     }
1381 
1382     return ret;
1383 
1384 out_release_dma:
1385     dspi_release_dma(dspi);
1386 out_free_irq:
1387     if (dspi->irq)
1388         free_irq(dspi->irq, dspi);
1389 out_clk_put:
1390     clk_disable_unprepare(dspi->clk);
1391 out_ctlr_put:
1392     spi_controller_put(ctlr);
1393 
1394     return ret;
1395 }
1396 
1397 static int dspi_remove(struct platform_device *pdev)
1398 {
1399     struct fsl_dspi *dspi = platform_get_drvdata(pdev);
1400 
1401     /* Disconnect from the SPI framework */
1402     spi_unregister_controller(dspi->ctlr);
1403 
1404     /* Disable RX and TX */
1405     regmap_update_bits(dspi->regmap, SPI_MCR,
1406                SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
1407                SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
1408 
1409     /* Stop Running */
1410     regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
1411 
1412     dspi_release_dma(dspi);
1413     if (dspi->irq)
1414         free_irq(dspi->irq, dspi);
1415     clk_disable_unprepare(dspi->clk);
1416 
1417     return 0;
1418 }
1419 
1420 static void dspi_shutdown(struct platform_device *pdev)
1421 {
1422     dspi_remove(pdev);
1423 }
1424 
1425 static struct platform_driver fsl_dspi_driver = {
1426     .driver.name        = DRIVER_NAME,
1427     .driver.of_match_table  = fsl_dspi_dt_ids,
1428     .driver.owner       = THIS_MODULE,
1429     .driver.pm      = &dspi_pm,
1430     .probe          = dspi_probe,
1431     .remove         = dspi_remove,
1432     .shutdown       = dspi_shutdown,
1433 };
1434 module_platform_driver(fsl_dspi_driver);
1435 
1436 MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
1437 MODULE_LICENSE("GPL");
1438 MODULE_ALIAS("platform:" DRIVER_NAME);