Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
0003 // Copyright (C) 2008 Juergen Beisert
0004 
0005 #include <linux/clk.h>
0006 #include <linux/completion.h>
0007 #include <linux/delay.h>
0008 #include <linux/dmaengine.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/err.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/io.h>
0013 #include <linux/irq.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/pinctrl/consumer.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/pm_runtime.h>
0019 #include <linux/slab.h>
0020 #include <linux/spi/spi.h>
0021 #include <linux/types.h>
0022 #include <linux/of.h>
0023 #include <linux/of_device.h>
0024 #include <linux/property.h>
0025 
0026 #include <linux/dma/imx-dma.h>
0027 
0028 #define DRIVER_NAME "spi_imx"
0029 
0030 static bool use_dma = true;
0031 module_param(use_dma, bool, 0644);
0032 MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
0033 
0034 /* define polling limits */
0035 static unsigned int polling_limit_us = 30;
0036 module_param(polling_limit_us, uint, 0664);
0037 MODULE_PARM_DESC(polling_limit_us,
0038          "time in us to run a transfer in polling mode\n");
0039 
0040 #define MXC_RPM_TIMEOUT     2000 /* 2000ms */
0041 
0042 #define MXC_CSPIRXDATA      0x00
0043 #define MXC_CSPITXDATA      0x04
0044 #define MXC_CSPICTRL        0x08
0045 #define MXC_CSPIINT     0x0c
0046 #define MXC_RESET       0x1c
0047 
0048 /* generic defines to abstract from the different register layouts */
0049 #define MXC_INT_RR  (1 << 0) /* Receive data ready interrupt */
0050 #define MXC_INT_TE  (1 << 1) /* Transmit FIFO empty interrupt */
0051 #define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
0052 
0053 /* The maximum bytes that a sdma BD can transfer. */
0054 #define MAX_SDMA_BD_BYTES (1 << 15)
0055 #define MX51_ECSPI_CTRL_MAX_BURST   512
0056 /* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
0057 #define MX53_MAX_TRANSFER_BYTES     512
0058 
0059 enum spi_imx_devtype {
0060     IMX1_CSPI,
0061     IMX21_CSPI,
0062     IMX27_CSPI,
0063     IMX31_CSPI,
0064     IMX35_CSPI, /* CSPI on all i.mx except above */
0065     IMX51_ECSPI,    /* ECSPI on i.mx51 */
0066     IMX53_ECSPI,    /* ECSPI on i.mx53 and later */
0067 };
0068 
0069 struct spi_imx_data;
0070 
0071 struct spi_imx_devtype_data {
0072     void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
0073     int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
0074     int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi);
0075     void (*trigger)(struct spi_imx_data *spi_imx);
0076     int (*rx_available)(struct spi_imx_data *spi_imx);
0077     void (*reset)(struct spi_imx_data *spi_imx);
0078     void (*setup_wml)(struct spi_imx_data *spi_imx);
0079     void (*disable)(struct spi_imx_data *spi_imx);
0080     void (*disable_dma)(struct spi_imx_data *spi_imx);
0081     bool has_dmamode;
0082     bool has_slavemode;
0083     unsigned int fifo_size;
0084     bool dynamic_burst;
0085     /*
0086      * ERR009165 fixed or not:
0087      * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
0088      */
0089     bool tx_glitch_fixed;
0090     enum spi_imx_devtype devtype;
0091 };
0092 
0093 struct spi_imx_data {
0094     struct spi_controller *controller;
0095     struct device *dev;
0096 
0097     struct completion xfer_done;
0098     void __iomem *base;
0099     unsigned long base_phys;
0100 
0101     struct clk *clk_per;
0102     struct clk *clk_ipg;
0103     unsigned long spi_clk;
0104     unsigned int spi_bus_clk;
0105 
0106     unsigned int bits_per_word;
0107     unsigned int spi_drctl;
0108 
0109     unsigned int count, remainder;
0110     void (*tx)(struct spi_imx_data *spi_imx);
0111     void (*rx)(struct spi_imx_data *spi_imx);
0112     void *rx_buf;
0113     const void *tx_buf;
0114     unsigned int txfifo; /* number of words pushed in tx FIFO */
0115     unsigned int dynamic_burst;
0116     bool rx_only;
0117 
0118     /* Slave mode */
0119     bool slave_mode;
0120     bool slave_aborted;
0121     unsigned int slave_burst;
0122 
0123     /* DMA */
0124     bool usedma;
0125     u32 wml;
0126     struct completion dma_rx_completion;
0127     struct completion dma_tx_completion;
0128 
0129     const struct spi_imx_devtype_data *devtype_data;
0130 };
0131 
0132 static inline int is_imx27_cspi(struct spi_imx_data *d)
0133 {
0134     return d->devtype_data->devtype == IMX27_CSPI;
0135 }
0136 
0137 static inline int is_imx35_cspi(struct spi_imx_data *d)
0138 {
0139     return d->devtype_data->devtype == IMX35_CSPI;
0140 }
0141 
0142 static inline int is_imx51_ecspi(struct spi_imx_data *d)
0143 {
0144     return d->devtype_data->devtype == IMX51_ECSPI;
0145 }
0146 
0147 static inline int is_imx53_ecspi(struct spi_imx_data *d)
0148 {
0149     return d->devtype_data->devtype == IMX53_ECSPI;
0150 }
0151 
0152 #define MXC_SPI_BUF_RX(type)                        \
0153 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx)     \
0154 {                                   \
0155     unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);   \
0156                                     \
0157     if (spi_imx->rx_buf) {                      \
0158         *(type *)spi_imx->rx_buf = val;             \
0159         spi_imx->rx_buf += sizeof(type);            \
0160     }                               \
0161                                     \
0162     spi_imx->remainder -= sizeof(type);             \
0163 }
0164 
0165 #define MXC_SPI_BUF_TX(type)                        \
0166 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx)     \
0167 {                                   \
0168     type val = 0;                           \
0169                                     \
0170     if (spi_imx->tx_buf) {                      \
0171         val = *(type *)spi_imx->tx_buf;             \
0172         spi_imx->tx_buf += sizeof(type);            \
0173     }                               \
0174                                     \
0175     spi_imx->count -= sizeof(type);                 \
0176                                     \
0177     writel(val, spi_imx->base + MXC_CSPITXDATA);            \
0178 }
0179 
0180 MXC_SPI_BUF_RX(u8)
0181 MXC_SPI_BUF_TX(u8)
0182 MXC_SPI_BUF_RX(u16)
0183 MXC_SPI_BUF_TX(u16)
0184 MXC_SPI_BUF_RX(u32)
0185 MXC_SPI_BUF_TX(u32)
0186 
0187 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
0188  * (which is currently not the case in this driver)
0189  */
0190 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
0191     256, 384, 512, 768, 1024};
0192 
0193 /* MX21, MX27 */
0194 static unsigned int spi_imx_clkdiv_1(unsigned int fin,
0195         unsigned int fspi, unsigned int max, unsigned int *fres)
0196 {
0197     int i;
0198 
0199     for (i = 2; i < max; i++)
0200         if (fspi * mxc_clkdivs[i] >= fin)
0201             break;
0202 
0203     *fres = fin / mxc_clkdivs[i];
0204     return i;
0205 }
0206 
0207 /* MX1, MX31, MX35, MX51 CSPI */
0208 static unsigned int spi_imx_clkdiv_2(unsigned int fin,
0209         unsigned int fspi, unsigned int *fres)
0210 {
0211     int i, div = 4;
0212 
0213     for (i = 0; i < 7; i++) {
0214         if (fspi * div >= fin)
0215             goto out;
0216         div <<= 1;
0217     }
0218 
0219 out:
0220     *fres = fin / div;
0221     return i;
0222 }
0223 
0224 static int spi_imx_bytes_per_word(const int bits_per_word)
0225 {
0226     if (bits_per_word <= 8)
0227         return 1;
0228     else if (bits_per_word <= 16)
0229         return 2;
0230     else
0231         return 4;
0232 }
0233 
0234 static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
0235              struct spi_transfer *transfer)
0236 {
0237     struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
0238 
0239     if (!use_dma || controller->fallback)
0240         return false;
0241 
0242     if (!controller->dma_rx)
0243         return false;
0244 
0245     if (spi_imx->slave_mode)
0246         return false;
0247 
0248     if (transfer->len < spi_imx->devtype_data->fifo_size)
0249         return false;
0250 
0251     spi_imx->dynamic_burst = 0;
0252 
0253     return true;
0254 }
0255 
0256 #define MX51_ECSPI_CTRL     0x08
0257 #define MX51_ECSPI_CTRL_ENABLE      (1 <<  0)
0258 #define MX51_ECSPI_CTRL_XCH     (1 <<  2)
0259 #define MX51_ECSPI_CTRL_SMC     (1 << 3)
0260 #define MX51_ECSPI_CTRL_MODE_MASK   (0xf << 4)
0261 #define MX51_ECSPI_CTRL_DRCTL(drctl)    ((drctl) << 16)
0262 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET  8
0263 #define MX51_ECSPI_CTRL_PREDIV_OFFSET   12
0264 #define MX51_ECSPI_CTRL_CS(cs)      ((cs) << 18)
0265 #define MX51_ECSPI_CTRL_BL_OFFSET   20
0266 #define MX51_ECSPI_CTRL_BL_MASK     (0xfff << 20)
0267 
0268 #define MX51_ECSPI_CONFIG   0x0c
0269 #define MX51_ECSPI_CONFIG_SCLKPHA(cs)   (1 << ((cs) +  0))
0270 #define MX51_ECSPI_CONFIG_SCLKPOL(cs)   (1 << ((cs) +  4))
0271 #define MX51_ECSPI_CONFIG_SBBCTRL(cs)   (1 << ((cs) +  8))
0272 #define MX51_ECSPI_CONFIG_SSBPOL(cs)    (1 << ((cs) + 12))
0273 #define MX51_ECSPI_CONFIG_SCLKCTL(cs)   (1 << ((cs) + 20))
0274 
0275 #define MX51_ECSPI_INT      0x10
0276 #define MX51_ECSPI_INT_TEEN     (1 <<  0)
0277 #define MX51_ECSPI_INT_RREN     (1 <<  3)
0278 #define MX51_ECSPI_INT_RDREN        (1 <<  4)
0279 
0280 #define MX51_ECSPI_DMA      0x14
0281 #define MX51_ECSPI_DMA_TX_WML(wml)  ((wml) & 0x3f)
0282 #define MX51_ECSPI_DMA_RX_WML(wml)  (((wml) & 0x3f) << 16)
0283 #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
0284 
0285 #define MX51_ECSPI_DMA_TEDEN        (1 << 7)
0286 #define MX51_ECSPI_DMA_RXDEN        (1 << 23)
0287 #define MX51_ECSPI_DMA_RXTDEN       (1 << 31)
0288 
0289 #define MX51_ECSPI_STAT     0x18
0290 #define MX51_ECSPI_STAT_RR      (1 <<  3)
0291 
0292 #define MX51_ECSPI_TESTREG  0x20
0293 #define MX51_ECSPI_TESTREG_LBC  BIT(31)
0294 
0295 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
0296 {
0297     unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
0298 
0299     if (spi_imx->rx_buf) {
0300 #ifdef __LITTLE_ENDIAN
0301         unsigned int bytes_per_word;
0302 
0303         bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
0304         if (bytes_per_word == 1)
0305             swab32s(&val);
0306         else if (bytes_per_word == 2)
0307             swahw32s(&val);
0308 #endif
0309         *(u32 *)spi_imx->rx_buf = val;
0310         spi_imx->rx_buf += sizeof(u32);
0311     }
0312 
0313     spi_imx->remainder -= sizeof(u32);
0314 }
0315 
0316 static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
0317 {
0318     int unaligned;
0319     u32 val;
0320 
0321     unaligned = spi_imx->remainder % 4;
0322 
0323     if (!unaligned) {
0324         spi_imx_buf_rx_swap_u32(spi_imx);
0325         return;
0326     }
0327 
0328     if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
0329         spi_imx_buf_rx_u16(spi_imx);
0330         return;
0331     }
0332 
0333     val = readl(spi_imx->base + MXC_CSPIRXDATA);
0334 
0335     while (unaligned--) {
0336         if (spi_imx->rx_buf) {
0337             *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
0338             spi_imx->rx_buf++;
0339         }
0340         spi_imx->remainder--;
0341     }
0342 }
0343 
0344 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
0345 {
0346     u32 val = 0;
0347 #ifdef __LITTLE_ENDIAN
0348     unsigned int bytes_per_word;
0349 #endif
0350 
0351     if (spi_imx->tx_buf) {
0352         val = *(u32 *)spi_imx->tx_buf;
0353         spi_imx->tx_buf += sizeof(u32);
0354     }
0355 
0356     spi_imx->count -= sizeof(u32);
0357 #ifdef __LITTLE_ENDIAN
0358     bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
0359 
0360     if (bytes_per_word == 1)
0361         swab32s(&val);
0362     else if (bytes_per_word == 2)
0363         swahw32s(&val);
0364 #endif
0365     writel(val, spi_imx->base + MXC_CSPITXDATA);
0366 }
0367 
0368 static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
0369 {
0370     int unaligned;
0371     u32 val = 0;
0372 
0373     unaligned = spi_imx->count % 4;
0374 
0375     if (!unaligned) {
0376         spi_imx_buf_tx_swap_u32(spi_imx);
0377         return;
0378     }
0379 
0380     if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
0381         spi_imx_buf_tx_u16(spi_imx);
0382         return;
0383     }
0384 
0385     while (unaligned--) {
0386         if (spi_imx->tx_buf) {
0387             val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
0388             spi_imx->tx_buf++;
0389         }
0390         spi_imx->count--;
0391     }
0392 
0393     writel(val, spi_imx->base + MXC_CSPITXDATA);
0394 }
0395 
0396 static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
0397 {
0398     u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
0399 
0400     if (spi_imx->rx_buf) {
0401         int n_bytes = spi_imx->slave_burst % sizeof(val);
0402 
0403         if (!n_bytes)
0404             n_bytes = sizeof(val);
0405 
0406         memcpy(spi_imx->rx_buf,
0407                ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
0408 
0409         spi_imx->rx_buf += n_bytes;
0410         spi_imx->slave_burst -= n_bytes;
0411     }
0412 
0413     spi_imx->remainder -= sizeof(u32);
0414 }
0415 
0416 static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
0417 {
0418     u32 val = 0;
0419     int n_bytes = spi_imx->count % sizeof(val);
0420 
0421     if (!n_bytes)
0422         n_bytes = sizeof(val);
0423 
0424     if (spi_imx->tx_buf) {
0425         memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
0426                spi_imx->tx_buf, n_bytes);
0427         val = cpu_to_be32(val);
0428         spi_imx->tx_buf += n_bytes;
0429     }
0430 
0431     spi_imx->count -= n_bytes;
0432 
0433     writel(val, spi_imx->base + MXC_CSPITXDATA);
0434 }
0435 
0436 /* MX51 eCSPI */
0437 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
0438                       unsigned int fspi, unsigned int *fres)
0439 {
0440     /*
0441      * there are two 4-bit dividers, the pre-divider divides by
0442      * $pre, the post-divider by 2^$post
0443      */
0444     unsigned int pre, post;
0445     unsigned int fin = spi_imx->spi_clk;
0446 
0447     if (unlikely(fspi > fin))
0448         return 0;
0449 
0450     post = fls(fin) - fls(fspi);
0451     if (fin > fspi << post)
0452         post++;
0453 
0454     /* now we have: (fin <= fspi << post) with post being minimal */
0455 
0456     post = max(4U, post) - 4;
0457     if (unlikely(post > 0xf)) {
0458         dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
0459                 fspi, fin);
0460         return 0xff;
0461     }
0462 
0463     pre = DIV_ROUND_UP(fin, fspi << post) - 1;
0464 
0465     dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
0466             __func__, fin, fspi, post, pre);
0467 
0468     /* Resulting frequency for the SCLK line. */
0469     *fres = (fin / (pre + 1)) >> post;
0470 
0471     return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
0472         (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
0473 }
0474 
0475 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
0476 {
0477     unsigned int val = 0;
0478 
0479     if (enable & MXC_INT_TE)
0480         val |= MX51_ECSPI_INT_TEEN;
0481 
0482     if (enable & MXC_INT_RR)
0483         val |= MX51_ECSPI_INT_RREN;
0484 
0485     if (enable & MXC_INT_RDR)
0486         val |= MX51_ECSPI_INT_RDREN;
0487 
0488     writel(val, spi_imx->base + MX51_ECSPI_INT);
0489 }
0490 
0491 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
0492 {
0493     u32 reg;
0494 
0495     reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
0496     reg |= MX51_ECSPI_CTRL_XCH;
0497     writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
0498 }
0499 
0500 static void mx51_disable_dma(struct spi_imx_data *spi_imx)
0501 {
0502     writel(0, spi_imx->base + MX51_ECSPI_DMA);
0503 }
0504 
0505 static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
0506 {
0507     u32 ctrl;
0508 
0509     ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
0510     ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
0511     writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
0512 }
0513 
0514 static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
0515                       struct spi_message *msg)
0516 {
0517     struct spi_device *spi = msg->spi;
0518     struct spi_transfer *xfer;
0519     u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
0520     u32 min_speed_hz = ~0U;
0521     u32 testreg, delay;
0522     u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
0523     u32 current_cfg = cfg;
0524 
0525     /* set Master or Slave mode */
0526     if (spi_imx->slave_mode)
0527         ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
0528     else
0529         ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
0530 
0531     /*
0532      * Enable SPI_RDY handling (falling edge/level triggered).
0533      */
0534     if (spi->mode & SPI_READY)
0535         ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
0536 
0537     /* set chip select to use */
0538     ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
0539 
0540     /*
0541      * The ctrl register must be written first, with the EN bit set other
0542      * registers must not be written to.
0543      */
0544     writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
0545 
0546     testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
0547     if (spi->mode & SPI_LOOP)
0548         testreg |= MX51_ECSPI_TESTREG_LBC;
0549     else
0550         testreg &= ~MX51_ECSPI_TESTREG_LBC;
0551     writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
0552 
0553     /*
0554      * eCSPI burst completion by Chip Select signal in Slave mode
0555      * is not functional for imx53 Soc, config SPI burst completed when
0556      * BURST_LENGTH + 1 bits are received
0557      */
0558     if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
0559         cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
0560     else
0561         cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
0562 
0563     if (spi->mode & SPI_CPOL) {
0564         cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
0565         cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
0566     } else {
0567         cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
0568         cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
0569     }
0570 
0571     if (spi->mode & SPI_CS_HIGH)
0572         cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
0573     else
0574         cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
0575 
0576     if (cfg == current_cfg)
0577         return 0;
0578 
0579     writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
0580 
0581     /*
0582      * Wait until the changes in the configuration register CONFIGREG
0583      * propagate into the hardware. It takes exactly one tick of the
0584      * SCLK clock, but we will wait two SCLK clock just to be sure. The
0585      * effect of the delay it takes for the hardware to apply changes
0586      * is noticable if the SCLK clock run very slow. In such a case, if
0587      * the polarity of SCLK should be inverted, the GPIO ChipSelect might
0588      * be asserted before the SCLK polarity changes, which would disrupt
0589      * the SPI communication as the device on the other end would consider
0590      * the change of SCLK polarity as a clock tick already.
0591      *
0592      * Because spi_imx->spi_bus_clk is only set in prepare_message
0593      * callback, iterate over all the transfers in spi_message, find the
0594      * one with lowest bus frequency, and use that bus frequency for the
0595      * delay calculation. In case all transfers have speed_hz == 0, then
0596      * min_speed_hz is ~0 and the resulting delay is zero.
0597      */
0598     list_for_each_entry(xfer, &msg->transfers, transfer_list) {
0599         if (!xfer->speed_hz)
0600             continue;
0601         min_speed_hz = min(xfer->speed_hz, min_speed_hz);
0602     }
0603 
0604     delay = (2 * 1000000) / min_speed_hz;
0605     if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
0606         udelay(delay);
0607     else            /* SCLK is _very_ slow */
0608         usleep_range(delay, delay + 10);
0609 
0610     return 0;
0611 }
0612 
0613 static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
0614                 struct spi_device *spi)
0615 {
0616     bool cpha = (spi->mode & SPI_CPHA);
0617     bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
0618     u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
0619 
0620     /* Flip cpha logical value iff flip_cpha */
0621     cpha ^= flip_cpha;
0622 
0623     if (cpha)
0624         cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
0625     else
0626         cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
0627 
0628     writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
0629 }
0630 
0631 static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
0632                        struct spi_device *spi)
0633 {
0634     u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
0635     u32 clk;
0636 
0637     /* Clear BL field and set the right value */
0638     ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
0639     if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
0640         ctrl |= (spi_imx->slave_burst * 8 - 1)
0641             << MX51_ECSPI_CTRL_BL_OFFSET;
0642     else
0643         ctrl |= (spi_imx->bits_per_word - 1)
0644             << MX51_ECSPI_CTRL_BL_OFFSET;
0645 
0646     /* set clock speed */
0647     ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
0648           0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
0649     ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
0650     spi_imx->spi_bus_clk = clk;
0651 
0652     mx51_configure_cpha(spi_imx, spi);
0653 
0654     /*
0655      * ERR009165: work in XHC mode instead of SMC as PIO on the chips
0656      * before i.mx6ul.
0657      */
0658     if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
0659         ctrl |= MX51_ECSPI_CTRL_SMC;
0660     else
0661         ctrl &= ~MX51_ECSPI_CTRL_SMC;
0662 
0663     writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
0664 
0665     return 0;
0666 }
0667 
0668 static void mx51_setup_wml(struct spi_imx_data *spi_imx)
0669 {
0670     u32 tx_wml = 0;
0671 
0672     if (spi_imx->devtype_data->tx_glitch_fixed)
0673         tx_wml = spi_imx->wml;
0674     /*
0675      * Configure the DMA register: setup the watermark
0676      * and enable DMA request.
0677      */
0678     writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
0679         MX51_ECSPI_DMA_TX_WML(tx_wml) |
0680         MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
0681         MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
0682         MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
0683 }
0684 
0685 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
0686 {
0687     return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
0688 }
0689 
0690 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
0691 {
0692     /* drain receive buffer */
0693     while (mx51_ecspi_rx_available(spi_imx))
0694         readl(spi_imx->base + MXC_CSPIRXDATA);
0695 }
0696 
0697 #define MX31_INTREG_TEEN    (1 << 0)
0698 #define MX31_INTREG_RREN    (1 << 3)
0699 
0700 #define MX31_CSPICTRL_ENABLE    (1 << 0)
0701 #define MX31_CSPICTRL_MASTER    (1 << 1)
0702 #define MX31_CSPICTRL_XCH   (1 << 2)
0703 #define MX31_CSPICTRL_SMC   (1 << 3)
0704 #define MX31_CSPICTRL_POL   (1 << 4)
0705 #define MX31_CSPICTRL_PHA   (1 << 5)
0706 #define MX31_CSPICTRL_SSCTL (1 << 6)
0707 #define MX31_CSPICTRL_SSPOL (1 << 7)
0708 #define MX31_CSPICTRL_BC_SHIFT  8
0709 #define MX35_CSPICTRL_BL_SHIFT  20
0710 #define MX31_CSPICTRL_CS_SHIFT  24
0711 #define MX35_CSPICTRL_CS_SHIFT  12
0712 #define MX31_CSPICTRL_DR_SHIFT  16
0713 
0714 #define MX31_CSPI_DMAREG    0x10
0715 #define MX31_DMAREG_RH_DEN  (1<<4)
0716 #define MX31_DMAREG_TH_DEN  (1<<1)
0717 
0718 #define MX31_CSPISTATUS     0x14
0719 #define MX31_STATUS_RR      (1 << 3)
0720 
0721 #define MX31_CSPI_TESTREG   0x1C
0722 #define MX31_TEST_LBC       (1 << 14)
0723 
0724 /* These functions also work for the i.MX35, but be aware that
0725  * the i.MX35 has a slightly different register layout for bits
0726  * we do not use here.
0727  */
0728 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
0729 {
0730     unsigned int val = 0;
0731 
0732     if (enable & MXC_INT_TE)
0733         val |= MX31_INTREG_TEEN;
0734     if (enable & MXC_INT_RR)
0735         val |= MX31_INTREG_RREN;
0736 
0737     writel(val, spi_imx->base + MXC_CSPIINT);
0738 }
0739 
0740 static void mx31_trigger(struct spi_imx_data *spi_imx)
0741 {
0742     unsigned int reg;
0743 
0744     reg = readl(spi_imx->base + MXC_CSPICTRL);
0745     reg |= MX31_CSPICTRL_XCH;
0746     writel(reg, spi_imx->base + MXC_CSPICTRL);
0747 }
0748 
0749 static int mx31_prepare_message(struct spi_imx_data *spi_imx,
0750                 struct spi_message *msg)
0751 {
0752     return 0;
0753 }
0754 
0755 static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
0756                  struct spi_device *spi)
0757 {
0758     unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
0759     unsigned int clk;
0760 
0761     reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
0762         MX31_CSPICTRL_DR_SHIFT;
0763     spi_imx->spi_bus_clk = clk;
0764 
0765     if (is_imx35_cspi(spi_imx)) {
0766         reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
0767         reg |= MX31_CSPICTRL_SSCTL;
0768     } else {
0769         reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
0770     }
0771 
0772     if (spi->mode & SPI_CPHA)
0773         reg |= MX31_CSPICTRL_PHA;
0774     if (spi->mode & SPI_CPOL)
0775         reg |= MX31_CSPICTRL_POL;
0776     if (spi->mode & SPI_CS_HIGH)
0777         reg |= MX31_CSPICTRL_SSPOL;
0778     if (!spi->cs_gpiod)
0779         reg |= (spi->chip_select) <<
0780             (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
0781                           MX31_CSPICTRL_CS_SHIFT);
0782 
0783     if (spi_imx->usedma)
0784         reg |= MX31_CSPICTRL_SMC;
0785 
0786     writel(reg, spi_imx->base + MXC_CSPICTRL);
0787 
0788     reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
0789     if (spi->mode & SPI_LOOP)
0790         reg |= MX31_TEST_LBC;
0791     else
0792         reg &= ~MX31_TEST_LBC;
0793     writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
0794 
0795     if (spi_imx->usedma) {
0796         /*
0797          * configure DMA requests when RXFIFO is half full and
0798          * when TXFIFO is half empty
0799          */
0800         writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
0801             spi_imx->base + MX31_CSPI_DMAREG);
0802     }
0803 
0804     return 0;
0805 }
0806 
0807 static int mx31_rx_available(struct spi_imx_data *spi_imx)
0808 {
0809     return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
0810 }
0811 
0812 static void mx31_reset(struct spi_imx_data *spi_imx)
0813 {
0814     /* drain receive buffer */
0815     while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
0816         readl(spi_imx->base + MXC_CSPIRXDATA);
0817 }
0818 
0819 #define MX21_INTREG_RR      (1 << 4)
0820 #define MX21_INTREG_TEEN    (1 << 9)
0821 #define MX21_INTREG_RREN    (1 << 13)
0822 
0823 #define MX21_CSPICTRL_POL   (1 << 5)
0824 #define MX21_CSPICTRL_PHA   (1 << 6)
0825 #define MX21_CSPICTRL_SSPOL (1 << 8)
0826 #define MX21_CSPICTRL_XCH   (1 << 9)
0827 #define MX21_CSPICTRL_ENABLE    (1 << 10)
0828 #define MX21_CSPICTRL_MASTER    (1 << 11)
0829 #define MX21_CSPICTRL_DR_SHIFT  14
0830 #define MX21_CSPICTRL_CS_SHIFT  19
0831 
0832 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
0833 {
0834     unsigned int val = 0;
0835 
0836     if (enable & MXC_INT_TE)
0837         val |= MX21_INTREG_TEEN;
0838     if (enable & MXC_INT_RR)
0839         val |= MX21_INTREG_RREN;
0840 
0841     writel(val, spi_imx->base + MXC_CSPIINT);
0842 }
0843 
0844 static void mx21_trigger(struct spi_imx_data *spi_imx)
0845 {
0846     unsigned int reg;
0847 
0848     reg = readl(spi_imx->base + MXC_CSPICTRL);
0849     reg |= MX21_CSPICTRL_XCH;
0850     writel(reg, spi_imx->base + MXC_CSPICTRL);
0851 }
0852 
0853 static int mx21_prepare_message(struct spi_imx_data *spi_imx,
0854                 struct spi_message *msg)
0855 {
0856     return 0;
0857 }
0858 
0859 static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
0860                  struct spi_device *spi)
0861 {
0862     unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
0863     unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
0864     unsigned int clk;
0865 
0866     reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
0867         << MX21_CSPICTRL_DR_SHIFT;
0868     spi_imx->spi_bus_clk = clk;
0869 
0870     reg |= spi_imx->bits_per_word - 1;
0871 
0872     if (spi->mode & SPI_CPHA)
0873         reg |= MX21_CSPICTRL_PHA;
0874     if (spi->mode & SPI_CPOL)
0875         reg |= MX21_CSPICTRL_POL;
0876     if (spi->mode & SPI_CS_HIGH)
0877         reg |= MX21_CSPICTRL_SSPOL;
0878     if (!spi->cs_gpiod)
0879         reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
0880 
0881     writel(reg, spi_imx->base + MXC_CSPICTRL);
0882 
0883     return 0;
0884 }
0885 
0886 static int mx21_rx_available(struct spi_imx_data *spi_imx)
0887 {
0888     return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
0889 }
0890 
0891 static void mx21_reset(struct spi_imx_data *spi_imx)
0892 {
0893     writel(1, spi_imx->base + MXC_RESET);
0894 }
0895 
0896 #define MX1_INTREG_RR       (1 << 3)
0897 #define MX1_INTREG_TEEN     (1 << 8)
0898 #define MX1_INTREG_RREN     (1 << 11)
0899 
0900 #define MX1_CSPICTRL_POL    (1 << 4)
0901 #define MX1_CSPICTRL_PHA    (1 << 5)
0902 #define MX1_CSPICTRL_XCH    (1 << 8)
0903 #define MX1_CSPICTRL_ENABLE (1 << 9)
0904 #define MX1_CSPICTRL_MASTER (1 << 10)
0905 #define MX1_CSPICTRL_DR_SHIFT   13
0906 
0907 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
0908 {
0909     unsigned int val = 0;
0910 
0911     if (enable & MXC_INT_TE)
0912         val |= MX1_INTREG_TEEN;
0913     if (enable & MXC_INT_RR)
0914         val |= MX1_INTREG_RREN;
0915 
0916     writel(val, spi_imx->base + MXC_CSPIINT);
0917 }
0918 
0919 static void mx1_trigger(struct spi_imx_data *spi_imx)
0920 {
0921     unsigned int reg;
0922 
0923     reg = readl(spi_imx->base + MXC_CSPICTRL);
0924     reg |= MX1_CSPICTRL_XCH;
0925     writel(reg, spi_imx->base + MXC_CSPICTRL);
0926 }
0927 
0928 static int mx1_prepare_message(struct spi_imx_data *spi_imx,
0929                    struct spi_message *msg)
0930 {
0931     return 0;
0932 }
0933 
0934 static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
0935                 struct spi_device *spi)
0936 {
0937     unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
0938     unsigned int clk;
0939 
0940     reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
0941         MX1_CSPICTRL_DR_SHIFT;
0942     spi_imx->spi_bus_clk = clk;
0943 
0944     reg |= spi_imx->bits_per_word - 1;
0945 
0946     if (spi->mode & SPI_CPHA)
0947         reg |= MX1_CSPICTRL_PHA;
0948     if (spi->mode & SPI_CPOL)
0949         reg |= MX1_CSPICTRL_POL;
0950 
0951     writel(reg, spi_imx->base + MXC_CSPICTRL);
0952 
0953     return 0;
0954 }
0955 
0956 static int mx1_rx_available(struct spi_imx_data *spi_imx)
0957 {
0958     return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
0959 }
0960 
0961 static void mx1_reset(struct spi_imx_data *spi_imx)
0962 {
0963     writel(1, spi_imx->base + MXC_RESET);
0964 }
0965 
0966 static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
0967     .intctrl = mx1_intctrl,
0968     .prepare_message = mx1_prepare_message,
0969     .prepare_transfer = mx1_prepare_transfer,
0970     .trigger = mx1_trigger,
0971     .rx_available = mx1_rx_available,
0972     .reset = mx1_reset,
0973     .fifo_size = 8,
0974     .has_dmamode = false,
0975     .dynamic_burst = false,
0976     .has_slavemode = false,
0977     .devtype = IMX1_CSPI,
0978 };
0979 
0980 static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
0981     .intctrl = mx21_intctrl,
0982     .prepare_message = mx21_prepare_message,
0983     .prepare_transfer = mx21_prepare_transfer,
0984     .trigger = mx21_trigger,
0985     .rx_available = mx21_rx_available,
0986     .reset = mx21_reset,
0987     .fifo_size = 8,
0988     .has_dmamode = false,
0989     .dynamic_burst = false,
0990     .has_slavemode = false,
0991     .devtype = IMX21_CSPI,
0992 };
0993 
0994 static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
0995     /* i.mx27 cspi shares the functions with i.mx21 one */
0996     .intctrl = mx21_intctrl,
0997     .prepare_message = mx21_prepare_message,
0998     .prepare_transfer = mx21_prepare_transfer,
0999     .trigger = mx21_trigger,
1000     .rx_available = mx21_rx_available,
1001     .reset = mx21_reset,
1002     .fifo_size = 8,
1003     .has_dmamode = false,
1004     .dynamic_burst = false,
1005     .has_slavemode = false,
1006     .devtype = IMX27_CSPI,
1007 };
1008 
1009 static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
1010     .intctrl = mx31_intctrl,
1011     .prepare_message = mx31_prepare_message,
1012     .prepare_transfer = mx31_prepare_transfer,
1013     .trigger = mx31_trigger,
1014     .rx_available = mx31_rx_available,
1015     .reset = mx31_reset,
1016     .fifo_size = 8,
1017     .has_dmamode = false,
1018     .dynamic_burst = false,
1019     .has_slavemode = false,
1020     .devtype = IMX31_CSPI,
1021 };
1022 
1023 static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
1024     /* i.mx35 and later cspi shares the functions with i.mx31 one */
1025     .intctrl = mx31_intctrl,
1026     .prepare_message = mx31_prepare_message,
1027     .prepare_transfer = mx31_prepare_transfer,
1028     .trigger = mx31_trigger,
1029     .rx_available = mx31_rx_available,
1030     .reset = mx31_reset,
1031     .fifo_size = 8,
1032     .has_dmamode = true,
1033     .dynamic_burst = false,
1034     .has_slavemode = false,
1035     .devtype = IMX35_CSPI,
1036 };
1037 
1038 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
1039     .intctrl = mx51_ecspi_intctrl,
1040     .prepare_message = mx51_ecspi_prepare_message,
1041     .prepare_transfer = mx51_ecspi_prepare_transfer,
1042     .trigger = mx51_ecspi_trigger,
1043     .rx_available = mx51_ecspi_rx_available,
1044     .reset = mx51_ecspi_reset,
1045     .setup_wml = mx51_setup_wml,
1046     .disable_dma = mx51_disable_dma,
1047     .fifo_size = 64,
1048     .has_dmamode = true,
1049     .dynamic_burst = true,
1050     .has_slavemode = true,
1051     .disable = mx51_ecspi_disable,
1052     .devtype = IMX51_ECSPI,
1053 };
1054 
1055 static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
1056     .intctrl = mx51_ecspi_intctrl,
1057     .prepare_message = mx51_ecspi_prepare_message,
1058     .prepare_transfer = mx51_ecspi_prepare_transfer,
1059     .trigger = mx51_ecspi_trigger,
1060     .rx_available = mx51_ecspi_rx_available,
1061     .disable_dma = mx51_disable_dma,
1062     .reset = mx51_ecspi_reset,
1063     .fifo_size = 64,
1064     .has_dmamode = true,
1065     .has_slavemode = true,
1066     .disable = mx51_ecspi_disable,
1067     .devtype = IMX53_ECSPI,
1068 };
1069 
1070 static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
1071     .intctrl = mx51_ecspi_intctrl,
1072     .prepare_message = mx51_ecspi_prepare_message,
1073     .prepare_transfer = mx51_ecspi_prepare_transfer,
1074     .trigger = mx51_ecspi_trigger,
1075     .rx_available = mx51_ecspi_rx_available,
1076     .reset = mx51_ecspi_reset,
1077     .setup_wml = mx51_setup_wml,
1078     .fifo_size = 64,
1079     .has_dmamode = true,
1080     .dynamic_burst = true,
1081     .has_slavemode = true,
1082     .tx_glitch_fixed = true,
1083     .disable = mx51_ecspi_disable,
1084     .devtype = IMX51_ECSPI,
1085 };
1086 
1087 static const struct of_device_id spi_imx_dt_ids[] = {
1088     { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
1089     { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
1090     { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
1091     { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
1092     { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
1093     { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
1094     { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
1095     { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
1096     { /* sentinel */ }
1097 };
1098 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
1099 
1100 static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
1101 {
1102     u32 ctrl;
1103 
1104     ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
1105     ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1106     ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
1107     writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
1108 }
1109 
1110 static void spi_imx_push(struct spi_imx_data *spi_imx)
1111 {
1112     unsigned int burst_len;
1113 
1114     /*
1115      * Reload the FIFO when the remaining bytes to be transferred in the
1116      * current burst is 0. This only applies when bits_per_word is a
1117      * multiple of 8.
1118      */
1119     if (!spi_imx->remainder) {
1120         if (spi_imx->dynamic_burst) {
1121 
1122             /* We need to deal unaligned data first */
1123             burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
1124 
1125             if (!burst_len)
1126                 burst_len = MX51_ECSPI_CTRL_MAX_BURST;
1127 
1128             spi_imx_set_burst_len(spi_imx, burst_len * 8);
1129 
1130             spi_imx->remainder = burst_len;
1131         } else {
1132             spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1133         }
1134     }
1135 
1136     while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
1137         if (!spi_imx->count)
1138             break;
1139         if (spi_imx->dynamic_burst &&
1140             spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
1141             break;
1142         spi_imx->tx(spi_imx);
1143         spi_imx->txfifo++;
1144     }
1145 
1146     if (!spi_imx->slave_mode)
1147         spi_imx->devtype_data->trigger(spi_imx);
1148 }
1149 
1150 static irqreturn_t spi_imx_isr(int irq, void *dev_id)
1151 {
1152     struct spi_imx_data *spi_imx = dev_id;
1153 
1154     while (spi_imx->txfifo &&
1155            spi_imx->devtype_data->rx_available(spi_imx)) {
1156         spi_imx->rx(spi_imx);
1157         spi_imx->txfifo--;
1158     }
1159 
1160     if (spi_imx->count) {
1161         spi_imx_push(spi_imx);
1162         return IRQ_HANDLED;
1163     }
1164 
1165     if (spi_imx->txfifo) {
1166         /* No data left to push, but still waiting for rx data,
1167          * enable receive data available interrupt.
1168          */
1169         spi_imx->devtype_data->intctrl(
1170                 spi_imx, MXC_INT_RR);
1171         return IRQ_HANDLED;
1172     }
1173 
1174     spi_imx->devtype_data->intctrl(spi_imx, 0);
1175     complete(&spi_imx->xfer_done);
1176 
1177     return IRQ_HANDLED;
1178 }
1179 
1180 static int spi_imx_dma_configure(struct spi_controller *controller)
1181 {
1182     int ret;
1183     enum dma_slave_buswidth buswidth;
1184     struct dma_slave_config rx = {}, tx = {};
1185     struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
1186 
1187     switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
1188     case 4:
1189         buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1190         break;
1191     case 2:
1192         buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
1193         break;
1194     case 1:
1195         buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
1196         break;
1197     default:
1198         return -EINVAL;
1199     }
1200 
1201     tx.direction = DMA_MEM_TO_DEV;
1202     tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
1203     tx.dst_addr_width = buswidth;
1204     tx.dst_maxburst = spi_imx->wml;
1205     ret = dmaengine_slave_config(controller->dma_tx, &tx);
1206     if (ret) {
1207         dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
1208         return ret;
1209     }
1210 
1211     rx.direction = DMA_DEV_TO_MEM;
1212     rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
1213     rx.src_addr_width = buswidth;
1214     rx.src_maxburst = spi_imx->wml;
1215     ret = dmaengine_slave_config(controller->dma_rx, &rx);
1216     if (ret) {
1217         dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
1218         return ret;
1219     }
1220 
1221     return 0;
1222 }
1223 
1224 static int spi_imx_setupxfer(struct spi_device *spi,
1225                  struct spi_transfer *t)
1226 {
1227     struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
1228 
1229     if (!t)
1230         return 0;
1231 
1232     if (!t->speed_hz) {
1233         if (!spi->max_speed_hz) {
1234             dev_err(&spi->dev, "no speed_hz provided!\n");
1235             return -EINVAL;
1236         }
1237         dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
1238         spi_imx->spi_bus_clk = spi->max_speed_hz;
1239     } else
1240         spi_imx->spi_bus_clk = t->speed_hz;
1241 
1242     spi_imx->bits_per_word = t->bits_per_word;
1243 
1244     /*
1245      * Initialize the functions for transfer. To transfer non byte-aligned
1246      * words, we have to use multiple word-size bursts, we can't use
1247      * dynamic_burst in that case.
1248      */
1249     if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
1250         !(spi->mode & SPI_CS_WORD) &&
1251         (spi_imx->bits_per_word == 8 ||
1252         spi_imx->bits_per_word == 16 ||
1253         spi_imx->bits_per_word == 32)) {
1254 
1255         spi_imx->rx = spi_imx_buf_rx_swap;
1256         spi_imx->tx = spi_imx_buf_tx_swap;
1257         spi_imx->dynamic_burst = 1;
1258 
1259     } else {
1260         if (spi_imx->bits_per_word <= 8) {
1261             spi_imx->rx = spi_imx_buf_rx_u8;
1262             spi_imx->tx = spi_imx_buf_tx_u8;
1263         } else if (spi_imx->bits_per_word <= 16) {
1264             spi_imx->rx = spi_imx_buf_rx_u16;
1265             spi_imx->tx = spi_imx_buf_tx_u16;
1266         } else {
1267             spi_imx->rx = spi_imx_buf_rx_u32;
1268             spi_imx->tx = spi_imx_buf_tx_u32;
1269         }
1270         spi_imx->dynamic_burst = 0;
1271     }
1272 
1273     if (spi_imx_can_dma(spi_imx->controller, spi, t))
1274         spi_imx->usedma = true;
1275     else
1276         spi_imx->usedma = false;
1277 
1278     spi_imx->rx_only = ((t->tx_buf == NULL)
1279             || (t->tx_buf == spi->controller->dummy_tx));
1280 
1281     if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
1282         spi_imx->rx = mx53_ecspi_rx_slave;
1283         spi_imx->tx = mx53_ecspi_tx_slave;
1284         spi_imx->slave_burst = t->len;
1285     }
1286 
1287     spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
1288 
1289     return 0;
1290 }
1291 
1292 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
1293 {
1294     struct spi_controller *controller = spi_imx->controller;
1295 
1296     if (controller->dma_rx) {
1297         dma_release_channel(controller->dma_rx);
1298         controller->dma_rx = NULL;
1299     }
1300 
1301     if (controller->dma_tx) {
1302         dma_release_channel(controller->dma_tx);
1303         controller->dma_tx = NULL;
1304     }
1305 }
1306 
1307 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
1308                  struct spi_controller *controller)
1309 {
1310     int ret;
1311 
1312     spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
1313 
1314     /* Prepare for TX DMA: */
1315     controller->dma_tx = dma_request_chan(dev, "tx");
1316     if (IS_ERR(controller->dma_tx)) {
1317         ret = PTR_ERR(controller->dma_tx);
1318         dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
1319         controller->dma_tx = NULL;
1320         goto err;
1321     }
1322 
1323     /* Prepare for RX : */
1324     controller->dma_rx = dma_request_chan(dev, "rx");
1325     if (IS_ERR(controller->dma_rx)) {
1326         ret = PTR_ERR(controller->dma_rx);
1327         dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
1328         controller->dma_rx = NULL;
1329         goto err;
1330     }
1331 
1332     init_completion(&spi_imx->dma_rx_completion);
1333     init_completion(&spi_imx->dma_tx_completion);
1334     controller->can_dma = spi_imx_can_dma;
1335     controller->max_dma_len = MAX_SDMA_BD_BYTES;
1336     spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
1337                      SPI_CONTROLLER_MUST_TX;
1338 
1339     return 0;
1340 err:
1341     spi_imx_sdma_exit(spi_imx);
1342     return ret;
1343 }
1344 
1345 static void spi_imx_dma_rx_callback(void *cookie)
1346 {
1347     struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1348 
1349     complete(&spi_imx->dma_rx_completion);
1350 }
1351 
1352 static void spi_imx_dma_tx_callback(void *cookie)
1353 {
1354     struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1355 
1356     complete(&spi_imx->dma_tx_completion);
1357 }
1358 
1359 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
1360 {
1361     unsigned long timeout = 0;
1362 
1363     /* Time with actual data transfer and CS change delay related to HW */
1364     timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
1365 
1366     /* Add extra second for scheduler related activities */
1367     timeout += 1;
1368 
1369     /* Double calculated timeout */
1370     return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
1371 }
1372 
1373 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1374                 struct spi_transfer *transfer)
1375 {
1376     struct dma_async_tx_descriptor *desc_tx, *desc_rx;
1377     unsigned long transfer_timeout;
1378     unsigned long timeout;
1379     struct spi_controller *controller = spi_imx->controller;
1380     struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
1381     struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
1382     unsigned int bytes_per_word, i;
1383     int ret;
1384 
1385     /* Get the right burst length from the last sg to ensure no tail data */
1386     bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
1387     for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
1388         if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
1389             break;
1390     }
1391     /* Use 1 as wml in case no available burst length got */
1392     if (i == 0)
1393         i = 1;
1394 
1395     spi_imx->wml =  i;
1396 
1397     ret = spi_imx_dma_configure(controller);
1398     if (ret)
1399         goto dma_failure_no_start;
1400 
1401     if (!spi_imx->devtype_data->setup_wml) {
1402         dev_err(spi_imx->dev, "No setup_wml()?\n");
1403         ret = -EINVAL;
1404         goto dma_failure_no_start;
1405     }
1406     spi_imx->devtype_data->setup_wml(spi_imx);
1407 
1408     /*
1409      * The TX DMA setup starts the transfer, so make sure RX is configured
1410      * before TX.
1411      */
1412     desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
1413                 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
1414                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1415     if (!desc_rx) {
1416         ret = -EINVAL;
1417         goto dma_failure_no_start;
1418     }
1419 
1420     desc_rx->callback = spi_imx_dma_rx_callback;
1421     desc_rx->callback_param = (void *)spi_imx;
1422     dmaengine_submit(desc_rx);
1423     reinit_completion(&spi_imx->dma_rx_completion);
1424     dma_async_issue_pending(controller->dma_rx);
1425 
1426     desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
1427                 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
1428                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1429     if (!desc_tx) {
1430         dmaengine_terminate_all(controller->dma_tx);
1431         dmaengine_terminate_all(controller->dma_rx);
1432         return -EINVAL;
1433     }
1434 
1435     desc_tx->callback = spi_imx_dma_tx_callback;
1436     desc_tx->callback_param = (void *)spi_imx;
1437     dmaengine_submit(desc_tx);
1438     reinit_completion(&spi_imx->dma_tx_completion);
1439     dma_async_issue_pending(controller->dma_tx);
1440 
1441     transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1442 
1443     /* Wait SDMA to finish the data transfer.*/
1444     timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
1445                         transfer_timeout);
1446     if (!timeout) {
1447         dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
1448         dmaengine_terminate_all(controller->dma_tx);
1449         dmaengine_terminate_all(controller->dma_rx);
1450         return -ETIMEDOUT;
1451     }
1452 
1453     timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
1454                           transfer_timeout);
1455     if (!timeout) {
1456         dev_err(&controller->dev, "I/O Error in DMA RX\n");
1457         spi_imx->devtype_data->reset(spi_imx);
1458         dmaengine_terminate_all(controller->dma_rx);
1459         return -ETIMEDOUT;
1460     }
1461 
1462     return 0;
1463 /* fallback to pio */
1464 dma_failure_no_start:
1465     transfer->error |= SPI_TRANS_FAIL_NO_START;
1466     return ret;
1467 }
1468 
1469 static int spi_imx_pio_transfer(struct spi_device *spi,
1470                 struct spi_transfer *transfer)
1471 {
1472     struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
1473     unsigned long transfer_timeout;
1474     unsigned long timeout;
1475 
1476     spi_imx->tx_buf = transfer->tx_buf;
1477     spi_imx->rx_buf = transfer->rx_buf;
1478     spi_imx->count = transfer->len;
1479     spi_imx->txfifo = 0;
1480     spi_imx->remainder = 0;
1481 
1482     reinit_completion(&spi_imx->xfer_done);
1483 
1484     spi_imx_push(spi_imx);
1485 
1486     spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
1487 
1488     transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1489 
1490     timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
1491                           transfer_timeout);
1492     if (!timeout) {
1493         dev_err(&spi->dev, "I/O Error in PIO\n");
1494         spi_imx->devtype_data->reset(spi_imx);
1495         return -ETIMEDOUT;
1496     }
1497 
1498     return 0;
1499 }
1500 
1501 static int spi_imx_poll_transfer(struct spi_device *spi,
1502                  struct spi_transfer *transfer)
1503 {
1504     struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
1505     unsigned long timeout;
1506 
1507     spi_imx->tx_buf = transfer->tx_buf;
1508     spi_imx->rx_buf = transfer->rx_buf;
1509     spi_imx->count = transfer->len;
1510     spi_imx->txfifo = 0;
1511     spi_imx->remainder = 0;
1512 
1513     /* fill in the fifo before timeout calculations if we are
1514      * interrupted here, then the data is getting transferred by
1515      * the HW while we are interrupted
1516      */
1517     spi_imx_push(spi_imx);
1518 
1519     timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
1520     while (spi_imx->txfifo) {
1521         /* RX */
1522         while (spi_imx->txfifo &&
1523                spi_imx->devtype_data->rx_available(spi_imx)) {
1524             spi_imx->rx(spi_imx);
1525             spi_imx->txfifo--;
1526         }
1527 
1528         /* TX */
1529         if (spi_imx->count) {
1530             spi_imx_push(spi_imx);
1531             continue;
1532         }
1533 
1534         if (spi_imx->txfifo &&
1535             time_after(jiffies, timeout)) {
1536 
1537             dev_err_ratelimited(&spi->dev,
1538                         "timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
1539                         jiffies - timeout);
1540 
1541             /* fall back to interrupt mode */
1542             return spi_imx_pio_transfer(spi, transfer);
1543         }
1544     }
1545 
1546     return 0;
1547 }
1548 
1549 static int spi_imx_pio_transfer_slave(struct spi_device *spi,
1550                       struct spi_transfer *transfer)
1551 {
1552     struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
1553     int ret = 0;
1554 
1555     if (is_imx53_ecspi(spi_imx) &&
1556         transfer->len > MX53_MAX_TRANSFER_BYTES) {
1557         dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
1558             MX53_MAX_TRANSFER_BYTES);
1559         return -EMSGSIZE;
1560     }
1561 
1562     spi_imx->tx_buf = transfer->tx_buf;
1563     spi_imx->rx_buf = transfer->rx_buf;
1564     spi_imx->count = transfer->len;
1565     spi_imx->txfifo = 0;
1566     spi_imx->remainder = 0;
1567 
1568     reinit_completion(&spi_imx->xfer_done);
1569     spi_imx->slave_aborted = false;
1570 
1571     spi_imx_push(spi_imx);
1572 
1573     spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
1574 
1575     if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
1576         spi_imx->slave_aborted) {
1577         dev_dbg(&spi->dev, "interrupted\n");
1578         ret = -EINTR;
1579     }
1580 
1581     /* ecspi has a HW issue when works in Slave mode,
1582      * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
1583      * ECSPI_TXDATA keeps shift out the last word data,
1584      * so we have to disable ECSPI when in slave mode after the
1585      * transfer completes
1586      */
1587     if (spi_imx->devtype_data->disable)
1588         spi_imx->devtype_data->disable(spi_imx);
1589 
1590     return ret;
1591 }
1592 
1593 static int spi_imx_transfer_one(struct spi_controller *controller,
1594                 struct spi_device *spi,
1595                 struct spi_transfer *transfer)
1596 {
1597     struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
1598     unsigned long hz_per_byte, byte_limit;
1599 
1600     spi_imx_setupxfer(spi, transfer);
1601     transfer->effective_speed_hz = spi_imx->spi_bus_clk;
1602 
1603     /* flush rxfifo before transfer */
1604     while (spi_imx->devtype_data->rx_available(spi_imx))
1605         readl(spi_imx->base + MXC_CSPIRXDATA);
1606 
1607     if (spi_imx->slave_mode)
1608         return spi_imx_pio_transfer_slave(spi, transfer);
1609 
1610     /*
1611      * Calculate the estimated time in us the transfer runs. Find
1612      * the number of Hz per byte per polling limit.
1613      */
1614     hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0;
1615     byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1;
1616 
1617     /* run in polling mode for short transfers */
1618     if (transfer->len < byte_limit)
1619         return spi_imx_poll_transfer(spi, transfer);
1620 
1621     if (spi_imx->usedma)
1622         return spi_imx_dma_transfer(spi_imx, transfer);
1623 
1624     return spi_imx_pio_transfer(spi, transfer);
1625 }
1626 
1627 static int spi_imx_setup(struct spi_device *spi)
1628 {
1629     dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
1630          spi->mode, spi->bits_per_word, spi->max_speed_hz);
1631 
1632     return 0;
1633 }
1634 
1635 static void spi_imx_cleanup(struct spi_device *spi)
1636 {
1637 }
1638 
1639 static int
1640 spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
1641 {
1642     struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
1643     int ret;
1644 
1645     ret = pm_runtime_resume_and_get(spi_imx->dev);
1646     if (ret < 0) {
1647         dev_err(spi_imx->dev, "failed to enable clock\n");
1648         return ret;
1649     }
1650 
1651     ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
1652     if (ret) {
1653         pm_runtime_mark_last_busy(spi_imx->dev);
1654         pm_runtime_put_autosuspend(spi_imx->dev);
1655     }
1656 
1657     return ret;
1658 }
1659 
1660 static int
1661 spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
1662 {
1663     struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
1664 
1665     pm_runtime_mark_last_busy(spi_imx->dev);
1666     pm_runtime_put_autosuspend(spi_imx->dev);
1667     return 0;
1668 }
1669 
1670 static int spi_imx_slave_abort(struct spi_controller *controller)
1671 {
1672     struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
1673 
1674     spi_imx->slave_aborted = true;
1675     complete(&spi_imx->xfer_done);
1676 
1677     return 0;
1678 }
1679 
1680 static int spi_imx_probe(struct platform_device *pdev)
1681 {
1682     struct device_node *np = pdev->dev.of_node;
1683     struct spi_controller *controller;
1684     struct spi_imx_data *spi_imx;
1685     struct resource *res;
1686     int ret, irq, spi_drctl;
1687     const struct spi_imx_devtype_data *devtype_data =
1688             of_device_get_match_data(&pdev->dev);
1689     bool slave_mode;
1690     u32 val;
1691 
1692     slave_mode = devtype_data->has_slavemode &&
1693             of_property_read_bool(np, "spi-slave");
1694     if (slave_mode)
1695         controller = spi_alloc_slave(&pdev->dev,
1696                          sizeof(struct spi_imx_data));
1697     else
1698         controller = spi_alloc_master(&pdev->dev,
1699                           sizeof(struct spi_imx_data));
1700     if (!controller)
1701         return -ENOMEM;
1702 
1703     ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
1704     if ((ret < 0) || (spi_drctl >= 0x3)) {
1705         /* '11' is reserved */
1706         spi_drctl = 0;
1707     }
1708 
1709     platform_set_drvdata(pdev, controller);
1710 
1711     controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1712     controller->bus_num = np ? -1 : pdev->id;
1713     controller->use_gpio_descriptors = true;
1714 
1715     spi_imx = spi_controller_get_devdata(controller);
1716     spi_imx->controller = controller;
1717     spi_imx->dev = &pdev->dev;
1718     spi_imx->slave_mode = slave_mode;
1719 
1720     spi_imx->devtype_data = devtype_data;
1721 
1722     /*
1723      * Get number of chip selects from device properties. This can be
1724      * coming from device tree or boardfiles, if it is not defined,
1725      * a default value of 3 chip selects will be used, as all the legacy
1726      * board files have <= 3 chip selects.
1727      */
1728     if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
1729         controller->num_chipselect = val;
1730     else
1731         controller->num_chipselect = 3;
1732 
1733     spi_imx->controller->transfer_one = spi_imx_transfer_one;
1734     spi_imx->controller->setup = spi_imx_setup;
1735     spi_imx->controller->cleanup = spi_imx_cleanup;
1736     spi_imx->controller->prepare_message = spi_imx_prepare_message;
1737     spi_imx->controller->unprepare_message = spi_imx_unprepare_message;
1738     spi_imx->controller->slave_abort = spi_imx_slave_abort;
1739     spi_imx->controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS;
1740 
1741     if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
1742         is_imx53_ecspi(spi_imx))
1743         spi_imx->controller->mode_bits |= SPI_LOOP | SPI_READY;
1744 
1745     if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
1746         spi_imx->controller->mode_bits |= SPI_RX_CPHA_FLIP;
1747 
1748     if (is_imx51_ecspi(spi_imx) &&
1749         device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
1750         /*
1751          * When using HW-CS implementing SPI_CS_WORD can be done by just
1752          * setting the burst length to the word size. This is
1753          * considerably faster than manually controlling the CS.
1754          */
1755         spi_imx->controller->mode_bits |= SPI_CS_WORD;
1756 
1757     spi_imx->spi_drctl = spi_drctl;
1758 
1759     init_completion(&spi_imx->xfer_done);
1760 
1761     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1762     spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
1763     if (IS_ERR(spi_imx->base)) {
1764         ret = PTR_ERR(spi_imx->base);
1765         goto out_controller_put;
1766     }
1767     spi_imx->base_phys = res->start;
1768 
1769     irq = platform_get_irq(pdev, 0);
1770     if (irq < 0) {
1771         ret = irq;
1772         goto out_controller_put;
1773     }
1774 
1775     ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1776                    dev_name(&pdev->dev), spi_imx);
1777     if (ret) {
1778         dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
1779         goto out_controller_put;
1780     }
1781 
1782     spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1783     if (IS_ERR(spi_imx->clk_ipg)) {
1784         ret = PTR_ERR(spi_imx->clk_ipg);
1785         goto out_controller_put;
1786     }
1787 
1788     spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
1789     if (IS_ERR(spi_imx->clk_per)) {
1790         ret = PTR_ERR(spi_imx->clk_per);
1791         goto out_controller_put;
1792     }
1793 
1794     ret = clk_prepare_enable(spi_imx->clk_per);
1795     if (ret)
1796         goto out_controller_put;
1797 
1798     ret = clk_prepare_enable(spi_imx->clk_ipg);
1799     if (ret)
1800         goto out_put_per;
1801 
1802     pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
1803     pm_runtime_use_autosuspend(spi_imx->dev);
1804     pm_runtime_get_noresume(spi_imx->dev);
1805     pm_runtime_set_active(spi_imx->dev);
1806     pm_runtime_enable(spi_imx->dev);
1807 
1808     spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
1809     /*
1810      * Only validated on i.mx35 and i.mx6 now, can remove the constraint
1811      * if validated on other chips.
1812      */
1813     if (spi_imx->devtype_data->has_dmamode) {
1814         ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
1815         if (ret == -EPROBE_DEFER)
1816             goto out_runtime_pm_put;
1817 
1818         if (ret < 0)
1819             dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
1820                 ret);
1821     }
1822 
1823     spi_imx->devtype_data->reset(spi_imx);
1824 
1825     spi_imx->devtype_data->intctrl(spi_imx, 0);
1826 
1827     controller->dev.of_node = pdev->dev.of_node;
1828     ret = spi_register_controller(controller);
1829     if (ret) {
1830         dev_err_probe(&pdev->dev, ret, "register controller failed\n");
1831         goto out_register_controller;
1832     }
1833 
1834     pm_runtime_mark_last_busy(spi_imx->dev);
1835     pm_runtime_put_autosuspend(spi_imx->dev);
1836 
1837     return ret;
1838 
1839 out_register_controller:
1840     if (spi_imx->devtype_data->has_dmamode)
1841         spi_imx_sdma_exit(spi_imx);
1842 out_runtime_pm_put:
1843     pm_runtime_dont_use_autosuspend(spi_imx->dev);
1844     pm_runtime_set_suspended(&pdev->dev);
1845     pm_runtime_disable(spi_imx->dev);
1846 
1847     clk_disable_unprepare(spi_imx->clk_ipg);
1848 out_put_per:
1849     clk_disable_unprepare(spi_imx->clk_per);
1850 out_controller_put:
1851     spi_controller_put(controller);
1852 
1853     return ret;
1854 }
1855 
1856 static int spi_imx_remove(struct platform_device *pdev)
1857 {
1858     struct spi_controller *controller = platform_get_drvdata(pdev);
1859     struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
1860     int ret;
1861 
1862     spi_unregister_controller(controller);
1863 
1864     ret = pm_runtime_resume_and_get(spi_imx->dev);
1865     if (ret < 0) {
1866         dev_err(spi_imx->dev, "failed to enable clock\n");
1867         return ret;
1868     }
1869 
1870     writel(0, spi_imx->base + MXC_CSPICTRL);
1871 
1872     pm_runtime_dont_use_autosuspend(spi_imx->dev);
1873     pm_runtime_put_sync(spi_imx->dev);
1874     pm_runtime_disable(spi_imx->dev);
1875 
1876     spi_imx_sdma_exit(spi_imx);
1877 
1878     return 0;
1879 }
1880 
1881 static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
1882 {
1883     struct spi_controller *controller = dev_get_drvdata(dev);
1884     struct spi_imx_data *spi_imx;
1885     int ret;
1886 
1887     spi_imx = spi_controller_get_devdata(controller);
1888 
1889     ret = clk_prepare_enable(spi_imx->clk_per);
1890     if (ret)
1891         return ret;
1892 
1893     ret = clk_prepare_enable(spi_imx->clk_ipg);
1894     if (ret) {
1895         clk_disable_unprepare(spi_imx->clk_per);
1896         return ret;
1897     }
1898 
1899     return 0;
1900 }
1901 
1902 static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
1903 {
1904     struct spi_controller *controller = dev_get_drvdata(dev);
1905     struct spi_imx_data *spi_imx;
1906 
1907     spi_imx = spi_controller_get_devdata(controller);
1908 
1909     clk_disable_unprepare(spi_imx->clk_per);
1910     clk_disable_unprepare(spi_imx->clk_ipg);
1911 
1912     return 0;
1913 }
1914 
1915 static int __maybe_unused spi_imx_suspend(struct device *dev)
1916 {
1917     pinctrl_pm_select_sleep_state(dev);
1918     return 0;
1919 }
1920 
1921 static int __maybe_unused spi_imx_resume(struct device *dev)
1922 {
1923     pinctrl_pm_select_default_state(dev);
1924     return 0;
1925 }
1926 
1927 static const struct dev_pm_ops imx_spi_pm = {
1928     SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
1929                 spi_imx_runtime_resume, NULL)
1930     SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
1931 };
1932 
1933 static struct platform_driver spi_imx_driver = {
1934     .driver = {
1935            .name = DRIVER_NAME,
1936            .of_match_table = spi_imx_dt_ids,
1937            .pm = &imx_spi_pm,
1938     },
1939     .probe = spi_imx_probe,
1940     .remove = spi_imx_remove,
1941 };
1942 module_platform_driver(spi_imx_driver);
1943 
1944 MODULE_DESCRIPTION("i.MX SPI Controller driver");
1945 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1946 MODULE_LICENSE("GPL");
1947 MODULE_ALIAS("platform:" DRIVER_NAME);