Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Driver for Atmel AT32 and AT91 SPI Controllers
0004  *
0005  * Copyright (C) 2006 Atmel Corporation
0006  */
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/clk.h>
0010 #include <linux/module.h>
0011 #include <linux/platform_device.h>
0012 #include <linux/delay.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/dmaengine.h>
0015 #include <linux/err.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/spi/spi.h>
0018 #include <linux/slab.h>
0019 #include <linux/of.h>
0020 
0021 #include <linux/io.h>
0022 #include <linux/gpio/consumer.h>
0023 #include <linux/pinctrl/consumer.h>
0024 #include <linux/pm_runtime.h>
0025 #include <trace/events/spi.h>
0026 
0027 /* SPI register offsets */
0028 #define SPI_CR                  0x0000
0029 #define SPI_MR                  0x0004
0030 #define SPI_RDR                 0x0008
0031 #define SPI_TDR                 0x000c
0032 #define SPI_SR                  0x0010
0033 #define SPI_IER                 0x0014
0034 #define SPI_IDR                 0x0018
0035 #define SPI_IMR                 0x001c
0036 #define SPI_CSR0                0x0030
0037 #define SPI_CSR1                0x0034
0038 #define SPI_CSR2                0x0038
0039 #define SPI_CSR3                0x003c
0040 #define SPI_FMR                 0x0040
0041 #define SPI_FLR                 0x0044
0042 #define SPI_VERSION             0x00fc
0043 #define SPI_RPR                 0x0100
0044 #define SPI_RCR                 0x0104
0045 #define SPI_TPR                 0x0108
0046 #define SPI_TCR                 0x010c
0047 #define SPI_RNPR                0x0110
0048 #define SPI_RNCR                0x0114
0049 #define SPI_TNPR                0x0118
0050 #define SPI_TNCR                0x011c
0051 #define SPI_PTCR                0x0120
0052 #define SPI_PTSR                0x0124
0053 
0054 /* Bitfields in CR */
0055 #define SPI_SPIEN_OFFSET            0
0056 #define SPI_SPIEN_SIZE              1
0057 #define SPI_SPIDIS_OFFSET           1
0058 #define SPI_SPIDIS_SIZE             1
0059 #define SPI_SWRST_OFFSET            7
0060 #define SPI_SWRST_SIZE              1
0061 #define SPI_LASTXFER_OFFSET         24
0062 #define SPI_LASTXFER_SIZE           1
0063 #define SPI_TXFCLR_OFFSET           16
0064 #define SPI_TXFCLR_SIZE             1
0065 #define SPI_RXFCLR_OFFSET           17
0066 #define SPI_RXFCLR_SIZE             1
0067 #define SPI_FIFOEN_OFFSET           30
0068 #define SPI_FIFOEN_SIZE             1
0069 #define SPI_FIFODIS_OFFSET          31
0070 #define SPI_FIFODIS_SIZE            1
0071 
0072 /* Bitfields in MR */
0073 #define SPI_MSTR_OFFSET             0
0074 #define SPI_MSTR_SIZE               1
0075 #define SPI_PS_OFFSET               1
0076 #define SPI_PS_SIZE             1
0077 #define SPI_PCSDEC_OFFSET           2
0078 #define SPI_PCSDEC_SIZE             1
0079 #define SPI_FDIV_OFFSET             3
0080 #define SPI_FDIV_SIZE               1
0081 #define SPI_MODFDIS_OFFSET          4
0082 #define SPI_MODFDIS_SIZE            1
0083 #define SPI_WDRBT_OFFSET            5
0084 #define SPI_WDRBT_SIZE              1
0085 #define SPI_LLB_OFFSET              7
0086 #define SPI_LLB_SIZE                1
0087 #define SPI_PCS_OFFSET              16
0088 #define SPI_PCS_SIZE                4
0089 #define SPI_DLYBCS_OFFSET           24
0090 #define SPI_DLYBCS_SIZE             8
0091 
0092 /* Bitfields in RDR */
0093 #define SPI_RD_OFFSET               0
0094 #define SPI_RD_SIZE             16
0095 
0096 /* Bitfields in TDR */
0097 #define SPI_TD_OFFSET               0
0098 #define SPI_TD_SIZE             16
0099 
0100 /* Bitfields in SR */
0101 #define SPI_RDRF_OFFSET             0
0102 #define SPI_RDRF_SIZE               1
0103 #define SPI_TDRE_OFFSET             1
0104 #define SPI_TDRE_SIZE               1
0105 #define SPI_MODF_OFFSET             2
0106 #define SPI_MODF_SIZE               1
0107 #define SPI_OVRES_OFFSET            3
0108 #define SPI_OVRES_SIZE              1
0109 #define SPI_ENDRX_OFFSET            4
0110 #define SPI_ENDRX_SIZE              1
0111 #define SPI_ENDTX_OFFSET            5
0112 #define SPI_ENDTX_SIZE              1
0113 #define SPI_RXBUFF_OFFSET           6
0114 #define SPI_RXBUFF_SIZE             1
0115 #define SPI_TXBUFE_OFFSET           7
0116 #define SPI_TXBUFE_SIZE             1
0117 #define SPI_NSSR_OFFSET             8
0118 #define SPI_NSSR_SIZE               1
0119 #define SPI_TXEMPTY_OFFSET          9
0120 #define SPI_TXEMPTY_SIZE            1
0121 #define SPI_SPIENS_OFFSET           16
0122 #define SPI_SPIENS_SIZE             1
0123 #define SPI_TXFEF_OFFSET            24
0124 #define SPI_TXFEF_SIZE              1
0125 #define SPI_TXFFF_OFFSET            25
0126 #define SPI_TXFFF_SIZE              1
0127 #define SPI_TXFTHF_OFFSET           26
0128 #define SPI_TXFTHF_SIZE             1
0129 #define SPI_RXFEF_OFFSET            27
0130 #define SPI_RXFEF_SIZE              1
0131 #define SPI_RXFFF_OFFSET            28
0132 #define SPI_RXFFF_SIZE              1
0133 #define SPI_RXFTHF_OFFSET           29
0134 #define SPI_RXFTHF_SIZE             1
0135 #define SPI_TXFPTEF_OFFSET          30
0136 #define SPI_TXFPTEF_SIZE            1
0137 #define SPI_RXFPTEF_OFFSET          31
0138 #define SPI_RXFPTEF_SIZE            1
0139 
0140 /* Bitfields in CSR0 */
0141 #define SPI_CPOL_OFFSET             0
0142 #define SPI_CPOL_SIZE               1
0143 #define SPI_NCPHA_OFFSET            1
0144 #define SPI_NCPHA_SIZE              1
0145 #define SPI_CSAAT_OFFSET            3
0146 #define SPI_CSAAT_SIZE              1
0147 #define SPI_BITS_OFFSET             4
0148 #define SPI_BITS_SIZE               4
0149 #define SPI_SCBR_OFFSET             8
0150 #define SPI_SCBR_SIZE               8
0151 #define SPI_DLYBS_OFFSET            16
0152 #define SPI_DLYBS_SIZE              8
0153 #define SPI_DLYBCT_OFFSET           24
0154 #define SPI_DLYBCT_SIZE             8
0155 
0156 /* Bitfields in RCR */
0157 #define SPI_RXCTR_OFFSET            0
0158 #define SPI_RXCTR_SIZE              16
0159 
0160 /* Bitfields in TCR */
0161 #define SPI_TXCTR_OFFSET            0
0162 #define SPI_TXCTR_SIZE              16
0163 
0164 /* Bitfields in RNCR */
0165 #define SPI_RXNCR_OFFSET            0
0166 #define SPI_RXNCR_SIZE              16
0167 
0168 /* Bitfields in TNCR */
0169 #define SPI_TXNCR_OFFSET            0
0170 #define SPI_TXNCR_SIZE              16
0171 
0172 /* Bitfields in PTCR */
0173 #define SPI_RXTEN_OFFSET            0
0174 #define SPI_RXTEN_SIZE              1
0175 #define SPI_RXTDIS_OFFSET           1
0176 #define SPI_RXTDIS_SIZE             1
0177 #define SPI_TXTEN_OFFSET            8
0178 #define SPI_TXTEN_SIZE              1
0179 #define SPI_TXTDIS_OFFSET           9
0180 #define SPI_TXTDIS_SIZE             1
0181 
0182 /* Bitfields in FMR */
0183 #define SPI_TXRDYM_OFFSET           0
0184 #define SPI_TXRDYM_SIZE             2
0185 #define SPI_RXRDYM_OFFSET           4
0186 #define SPI_RXRDYM_SIZE             2
0187 #define SPI_TXFTHRES_OFFSET         16
0188 #define SPI_TXFTHRES_SIZE           6
0189 #define SPI_RXFTHRES_OFFSET         24
0190 #define SPI_RXFTHRES_SIZE           6
0191 
0192 /* Bitfields in FLR */
0193 #define SPI_TXFL_OFFSET             0
0194 #define SPI_TXFL_SIZE               6
0195 #define SPI_RXFL_OFFSET             16
0196 #define SPI_RXFL_SIZE               6
0197 
0198 /* Constants for BITS */
0199 #define SPI_BITS_8_BPT              0
0200 #define SPI_BITS_9_BPT              1
0201 #define SPI_BITS_10_BPT             2
0202 #define SPI_BITS_11_BPT             3
0203 #define SPI_BITS_12_BPT             4
0204 #define SPI_BITS_13_BPT             5
0205 #define SPI_BITS_14_BPT             6
0206 #define SPI_BITS_15_BPT             7
0207 #define SPI_BITS_16_BPT             8
0208 #define SPI_ONE_DATA                0
0209 #define SPI_TWO_DATA                1
0210 #define SPI_FOUR_DATA               2
0211 
0212 /* Bit manipulation macros */
0213 #define SPI_BIT(name) \
0214     (1 << SPI_##name##_OFFSET)
0215 #define SPI_BF(name, value) \
0216     (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
0217 #define SPI_BFEXT(name, value) \
0218     (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
0219 #define SPI_BFINS(name, value, old) \
0220     (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
0221       | SPI_BF(name, value))
0222 
0223 /* Register access macros */
0224 #define spi_readl(port, reg) \
0225     readl_relaxed((port)->regs + SPI_##reg)
0226 #define spi_writel(port, reg, value) \
0227     writel_relaxed((value), (port)->regs + SPI_##reg)
0228 #define spi_writew(port, reg, value) \
0229     writew_relaxed((value), (port)->regs + SPI_##reg)
0230 
0231 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
0232  * cache operations; better heuristics consider wordsize and bitrate.
0233  */
0234 #define DMA_MIN_BYTES   16
0235 
0236 #define SPI_DMA_TIMEOUT     (msecs_to_jiffies(1000))
0237 
0238 #define AUTOSUSPEND_TIMEOUT 2000
0239 
0240 struct atmel_spi_caps {
0241     bool    is_spi2;
0242     bool    has_wdrbt;
0243     bool    has_dma_support;
0244     bool    has_pdc_support;
0245 };
0246 
0247 /*
0248  * The core SPI transfer engine just talks to a register bank to set up
0249  * DMA transfers; transfer queue progress is driven by IRQs.  The clock
0250  * framework provides the base clock, subdivided for each spi_device.
0251  */
0252 struct atmel_spi {
0253     spinlock_t      lock;
0254     unsigned long       flags;
0255 
0256     phys_addr_t     phybase;
0257     void __iomem        *regs;
0258     int         irq;
0259     struct clk      *clk;
0260     struct platform_device  *pdev;
0261     unsigned long       spi_clk;
0262 
0263     struct spi_transfer *current_transfer;
0264     int         current_remaining_bytes;
0265     int         done_status;
0266     dma_addr_t      dma_addr_rx_bbuf;
0267     dma_addr_t      dma_addr_tx_bbuf;
0268     void            *addr_rx_bbuf;
0269     void            *addr_tx_bbuf;
0270 
0271     struct completion   xfer_completion;
0272 
0273     struct atmel_spi_caps   caps;
0274 
0275     bool            use_dma;
0276     bool            use_pdc;
0277 
0278     bool            keep_cs;
0279 
0280     u32         fifo_size;
0281     u8          native_cs_free;
0282     u8          native_cs_for_gpio;
0283 };
0284 
0285 /* Controller-specific per-slave state */
0286 struct atmel_spi_device {
0287     u32         csr;
0288 };
0289 
0290 #define SPI_MAX_DMA_XFER    65535 /* true for both PDC and DMA */
0291 #define INVALID_DMA_ADDRESS 0xffffffff
0292 
0293 /*
0294  * Version 2 of the SPI controller has
0295  *  - CR.LASTXFER
0296  *  - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
0297  *  - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
0298  *  - SPI_CSRx.CSAAT
0299  *  - SPI_CSRx.SBCR allows faster clocking
0300  */
0301 static bool atmel_spi_is_v2(struct atmel_spi *as)
0302 {
0303     return as->caps.is_spi2;
0304 }
0305 
0306 /*
0307  * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
0308  * they assume that spi slave device state will not change on deselect, so
0309  * that automagic deselection is OK.  ("NPCSx rises if no data is to be
0310  * transmitted")  Not so!  Workaround uses nCSx pins as GPIOs; or newer
0311  * controllers have CSAAT and friends.
0312  *
0313  * Even controller newer than ar91rm9200, using GPIOs can make sens as
0314  * it lets us support active-high chipselects despite the controller's
0315  * belief that only active-low devices/systems exists.
0316  *
0317  * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
0318  * right when driven with GPIO.  ("Mode Fault does not allow more than one
0319  * Master on Chip Select 0.")  No workaround exists for that ... so for
0320  * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
0321  * and (c) will trigger that first erratum in some cases.
0322  */
0323 
0324 static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
0325 {
0326     struct atmel_spi_device *asd = spi->controller_state;
0327     int chip_select;
0328     u32 mr;
0329 
0330     if (spi->cs_gpiod)
0331         chip_select = as->native_cs_for_gpio;
0332     else
0333         chip_select = spi->chip_select;
0334 
0335     if (atmel_spi_is_v2(as)) {
0336         spi_writel(as, CSR0 + 4 * chip_select, asd->csr);
0337         /* For the low SPI version, there is a issue that PDC transfer
0338          * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
0339          */
0340         spi_writel(as, CSR0, asd->csr);
0341         if (as->caps.has_wdrbt) {
0342             spi_writel(as, MR,
0343                     SPI_BF(PCS, ~(0x01 << chip_select))
0344                     | SPI_BIT(WDRBT)
0345                     | SPI_BIT(MODFDIS)
0346                     | SPI_BIT(MSTR));
0347         } else {
0348             spi_writel(as, MR,
0349                     SPI_BF(PCS, ~(0x01 << chip_select))
0350                     | SPI_BIT(MODFDIS)
0351                     | SPI_BIT(MSTR));
0352         }
0353 
0354         mr = spi_readl(as, MR);
0355     } else {
0356         u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
0357         int i;
0358         u32 csr;
0359 
0360         /* Make sure clock polarity is correct */
0361         for (i = 0; i < spi->master->num_chipselect; i++) {
0362             csr = spi_readl(as, CSR0 + 4 * i);
0363             if ((csr ^ cpol) & SPI_BIT(CPOL))
0364                 spi_writel(as, CSR0 + 4 * i,
0365                         csr ^ SPI_BIT(CPOL));
0366         }
0367 
0368         mr = spi_readl(as, MR);
0369         mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
0370         spi_writel(as, MR, mr);
0371     }
0372 
0373     dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
0374 }
0375 
0376 static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
0377 {
0378     int chip_select;
0379     u32 mr;
0380 
0381     if (spi->cs_gpiod)
0382         chip_select = as->native_cs_for_gpio;
0383     else
0384         chip_select = spi->chip_select;
0385 
0386     /* only deactivate *this* device; sometimes transfers to
0387      * another device may be active when this routine is called.
0388      */
0389     mr = spi_readl(as, MR);
0390     if (~SPI_BFEXT(PCS, mr) & (1 << chip_select)) {
0391         mr = SPI_BFINS(PCS, 0xf, mr);
0392         spi_writel(as, MR, mr);
0393     }
0394 
0395     dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
0396 
0397     if (!spi->cs_gpiod)
0398         spi_writel(as, CR, SPI_BIT(LASTXFER));
0399 }
0400 
0401 static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
0402 {
0403     spin_lock_irqsave(&as->lock, as->flags);
0404 }
0405 
0406 static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
0407 {
0408     spin_unlock_irqrestore(&as->lock, as->flags);
0409 }
0410 
0411 static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
0412 {
0413     return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
0414 }
0415 
0416 static inline bool atmel_spi_use_dma(struct atmel_spi *as,
0417                 struct spi_transfer *xfer)
0418 {
0419     return as->use_dma && xfer->len >= DMA_MIN_BYTES;
0420 }
0421 
0422 static bool atmel_spi_can_dma(struct spi_master *master,
0423                   struct spi_device *spi,
0424                   struct spi_transfer *xfer)
0425 {
0426     struct atmel_spi *as = spi_master_get_devdata(master);
0427 
0428     if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
0429         return atmel_spi_use_dma(as, xfer) &&
0430             !atmel_spi_is_vmalloc_xfer(xfer);
0431     else
0432         return atmel_spi_use_dma(as, xfer);
0433 
0434 }
0435 
0436 static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
0437 {
0438     struct spi_master *master = platform_get_drvdata(as->pdev);
0439     struct dma_slave_config slave_config;
0440     int err = 0;
0441 
0442     if (bits_per_word > 8) {
0443         slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
0444         slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
0445     } else {
0446         slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
0447         slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
0448     }
0449 
0450     slave_config.dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
0451     slave_config.src_addr = (dma_addr_t)as->phybase + SPI_RDR;
0452     slave_config.src_maxburst = 1;
0453     slave_config.dst_maxburst = 1;
0454     slave_config.device_fc = false;
0455 
0456     /*
0457      * This driver uses fixed peripheral select mode (PS bit set to '0' in
0458      * the Mode Register).
0459      * So according to the datasheet, when FIFOs are available (and
0460      * enabled), the Transmit FIFO operates in Multiple Data Mode.
0461      * In this mode, up to 2 data, not 4, can be written into the Transmit
0462      * Data Register in a single access.
0463      * However, the first data has to be written into the lowest 16 bits and
0464      * the second data into the highest 16 bits of the Transmit
0465      * Data Register. For 8bit data (the most frequent case), it would
0466      * require to rework tx_buf so each data would actually fit 16 bits.
0467      * So we'd rather write only one data at the time. Hence the transmit
0468      * path works the same whether FIFOs are available (and enabled) or not.
0469      */
0470     if (dmaengine_slave_config(master->dma_tx, &slave_config)) {
0471         dev_err(&as->pdev->dev,
0472             "failed to configure tx dma channel\n");
0473         err = -EINVAL;
0474     }
0475 
0476     /*
0477      * This driver configures the spi controller for master mode (MSTR bit
0478      * set to '1' in the Mode Register).
0479      * So according to the datasheet, when FIFOs are available (and
0480      * enabled), the Receive FIFO operates in Single Data Mode.
0481      * So the receive path works the same whether FIFOs are available (and
0482      * enabled) or not.
0483      */
0484     if (dmaengine_slave_config(master->dma_rx, &slave_config)) {
0485         dev_err(&as->pdev->dev,
0486             "failed to configure rx dma channel\n");
0487         err = -EINVAL;
0488     }
0489 
0490     return err;
0491 }
0492 
0493 static int atmel_spi_configure_dma(struct spi_master *master,
0494                    struct atmel_spi *as)
0495 {
0496     struct device *dev = &as->pdev->dev;
0497     int err;
0498 
0499     master->dma_tx = dma_request_chan(dev, "tx");
0500     if (IS_ERR(master->dma_tx)) {
0501         err = PTR_ERR(master->dma_tx);
0502         dev_dbg(dev, "No TX DMA channel, DMA is disabled\n");
0503         goto error_clear;
0504     }
0505 
0506     master->dma_rx = dma_request_chan(dev, "rx");
0507     if (IS_ERR(master->dma_rx)) {
0508         err = PTR_ERR(master->dma_rx);
0509         /*
0510          * No reason to check EPROBE_DEFER here since we have already
0511          * requested tx channel.
0512          */
0513         dev_dbg(dev, "No RX DMA channel, DMA is disabled\n");
0514         goto error;
0515     }
0516 
0517     err = atmel_spi_dma_slave_config(as, 8);
0518     if (err)
0519         goto error;
0520 
0521     dev_info(&as->pdev->dev,
0522             "Using %s (tx) and %s (rx) for DMA transfers\n",
0523             dma_chan_name(master->dma_tx),
0524             dma_chan_name(master->dma_rx));
0525 
0526     return 0;
0527 error:
0528     if (!IS_ERR(master->dma_rx))
0529         dma_release_channel(master->dma_rx);
0530     if (!IS_ERR(master->dma_tx))
0531         dma_release_channel(master->dma_tx);
0532 error_clear:
0533     master->dma_tx = master->dma_rx = NULL;
0534     return err;
0535 }
0536 
0537 static void atmel_spi_stop_dma(struct spi_master *master)
0538 {
0539     if (master->dma_rx)
0540         dmaengine_terminate_all(master->dma_rx);
0541     if (master->dma_tx)
0542         dmaengine_terminate_all(master->dma_tx);
0543 }
0544 
0545 static void atmel_spi_release_dma(struct spi_master *master)
0546 {
0547     if (master->dma_rx) {
0548         dma_release_channel(master->dma_rx);
0549         master->dma_rx = NULL;
0550     }
0551     if (master->dma_tx) {
0552         dma_release_channel(master->dma_tx);
0553         master->dma_tx = NULL;
0554     }
0555 }
0556 
0557 /* This function is called by the DMA driver from tasklet context */
0558 static void dma_callback(void *data)
0559 {
0560     struct spi_master   *master = data;
0561     struct atmel_spi    *as = spi_master_get_devdata(master);
0562 
0563     if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
0564         IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
0565         memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
0566                as->current_transfer->len);
0567     }
0568     complete(&as->xfer_completion);
0569 }
0570 
0571 /*
0572  * Next transfer using PIO without FIFO.
0573  */
0574 static void atmel_spi_next_xfer_single(struct spi_master *master,
0575                        struct spi_transfer *xfer)
0576 {
0577     struct atmel_spi    *as = spi_master_get_devdata(master);
0578     unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
0579 
0580     dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
0581 
0582     /* Make sure data is not remaining in RDR */
0583     spi_readl(as, RDR);
0584     while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
0585         spi_readl(as, RDR);
0586         cpu_relax();
0587     }
0588 
0589     if (xfer->bits_per_word > 8)
0590         spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
0591     else
0592         spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
0593 
0594     dev_dbg(master->dev.parent,
0595         "  start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
0596         xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
0597         xfer->bits_per_word);
0598 
0599     /* Enable relevant interrupts */
0600     spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
0601 }
0602 
0603 /*
0604  * Next transfer using PIO with FIFO.
0605  */
0606 static void atmel_spi_next_xfer_fifo(struct spi_master *master,
0607                      struct spi_transfer *xfer)
0608 {
0609     struct atmel_spi *as = spi_master_get_devdata(master);
0610     u32 current_remaining_data, num_data;
0611     u32 offset = xfer->len - as->current_remaining_bytes;
0612     const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
0613     const u8  *bytes = (const u8  *)((u8 *)xfer->tx_buf + offset);
0614     u16 td0, td1;
0615     u32 fifomr;
0616 
0617     dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_fifo\n");
0618 
0619     /* Compute the number of data to transfer in the current iteration */
0620     current_remaining_data = ((xfer->bits_per_word > 8) ?
0621                   ((u32)as->current_remaining_bytes >> 1) :
0622                   (u32)as->current_remaining_bytes);
0623     num_data = min(current_remaining_data, as->fifo_size);
0624 
0625     /* Flush RX and TX FIFOs */
0626     spi_writel(as, CR, SPI_BIT(RXFCLR) | SPI_BIT(TXFCLR));
0627     while (spi_readl(as, FLR))
0628         cpu_relax();
0629 
0630     /* Set RX FIFO Threshold to the number of data to transfer */
0631     fifomr = spi_readl(as, FMR);
0632     spi_writel(as, FMR, SPI_BFINS(RXFTHRES, num_data, fifomr));
0633 
0634     /* Clear FIFO flags in the Status Register, especially RXFTHF */
0635     (void)spi_readl(as, SR);
0636 
0637     /* Fill TX FIFO */
0638     while (num_data >= 2) {
0639         if (xfer->bits_per_word > 8) {
0640             td0 = *words++;
0641             td1 = *words++;
0642         } else {
0643             td0 = *bytes++;
0644             td1 = *bytes++;
0645         }
0646 
0647         spi_writel(as, TDR, (td1 << 16) | td0);
0648         num_data -= 2;
0649     }
0650 
0651     if (num_data) {
0652         if (xfer->bits_per_word > 8)
0653             td0 = *words++;
0654         else
0655             td0 = *bytes++;
0656 
0657         spi_writew(as, TDR, td0);
0658         num_data--;
0659     }
0660 
0661     dev_dbg(master->dev.parent,
0662         "  start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
0663         xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
0664         xfer->bits_per_word);
0665 
0666     /*
0667      * Enable RX FIFO Threshold Flag interrupt to be notified about
0668      * transfer completion.
0669      */
0670     spi_writel(as, IER, SPI_BIT(RXFTHF) | SPI_BIT(OVRES));
0671 }
0672 
0673 /*
0674  * Next transfer using PIO.
0675  */
0676 static void atmel_spi_next_xfer_pio(struct spi_master *master,
0677                     struct spi_transfer *xfer)
0678 {
0679     struct atmel_spi *as = spi_master_get_devdata(master);
0680 
0681     if (as->fifo_size)
0682         atmel_spi_next_xfer_fifo(master, xfer);
0683     else
0684         atmel_spi_next_xfer_single(master, xfer);
0685 }
0686 
0687 /*
0688  * Submit next transfer for DMA.
0689  */
0690 static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
0691                 struct spi_transfer *xfer,
0692                 u32 *plen)
0693 {
0694     struct atmel_spi    *as = spi_master_get_devdata(master);
0695     struct dma_chan     *rxchan = master->dma_rx;
0696     struct dma_chan     *txchan = master->dma_tx;
0697     struct dma_async_tx_descriptor *rxdesc;
0698     struct dma_async_tx_descriptor *txdesc;
0699     dma_cookie_t        cookie;
0700 
0701     dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
0702 
0703     /* Check that the channels are available */
0704     if (!rxchan || !txchan)
0705         return -ENODEV;
0706 
0707 
0708     *plen = xfer->len;
0709 
0710     if (atmel_spi_dma_slave_config(as, xfer->bits_per_word))
0711         goto err_exit;
0712 
0713     /* Send both scatterlists */
0714     if (atmel_spi_is_vmalloc_xfer(xfer) &&
0715         IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
0716         rxdesc = dmaengine_prep_slave_single(rxchan,
0717                              as->dma_addr_rx_bbuf,
0718                              xfer->len,
0719                              DMA_DEV_TO_MEM,
0720                              DMA_PREP_INTERRUPT |
0721                              DMA_CTRL_ACK);
0722     } else {
0723         rxdesc = dmaengine_prep_slave_sg(rxchan,
0724                          xfer->rx_sg.sgl,
0725                          xfer->rx_sg.nents,
0726                          DMA_DEV_TO_MEM,
0727                          DMA_PREP_INTERRUPT |
0728                          DMA_CTRL_ACK);
0729     }
0730     if (!rxdesc)
0731         goto err_dma;
0732 
0733     if (atmel_spi_is_vmalloc_xfer(xfer) &&
0734         IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
0735         memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
0736         txdesc = dmaengine_prep_slave_single(txchan,
0737                              as->dma_addr_tx_bbuf,
0738                              xfer->len, DMA_MEM_TO_DEV,
0739                              DMA_PREP_INTERRUPT |
0740                              DMA_CTRL_ACK);
0741     } else {
0742         txdesc = dmaengine_prep_slave_sg(txchan,
0743                          xfer->tx_sg.sgl,
0744                          xfer->tx_sg.nents,
0745                          DMA_MEM_TO_DEV,
0746                          DMA_PREP_INTERRUPT |
0747                          DMA_CTRL_ACK);
0748     }
0749     if (!txdesc)
0750         goto err_dma;
0751 
0752     dev_dbg(master->dev.parent,
0753         "  start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
0754         xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
0755         xfer->rx_buf, (unsigned long long)xfer->rx_dma);
0756 
0757     /* Enable relevant interrupts */
0758     spi_writel(as, IER, SPI_BIT(OVRES));
0759 
0760     /* Put the callback on the RX transfer only, that should finish last */
0761     rxdesc->callback = dma_callback;
0762     rxdesc->callback_param = master;
0763 
0764     /* Submit and fire RX and TX with TX last so we're ready to read! */
0765     cookie = rxdesc->tx_submit(rxdesc);
0766     if (dma_submit_error(cookie))
0767         goto err_dma;
0768     cookie = txdesc->tx_submit(txdesc);
0769     if (dma_submit_error(cookie))
0770         goto err_dma;
0771     rxchan->device->device_issue_pending(rxchan);
0772     txchan->device->device_issue_pending(txchan);
0773 
0774     return 0;
0775 
0776 err_dma:
0777     spi_writel(as, IDR, SPI_BIT(OVRES));
0778     atmel_spi_stop_dma(master);
0779 err_exit:
0780     return -ENOMEM;
0781 }
0782 
0783 static void atmel_spi_next_xfer_data(struct spi_master *master,
0784                 struct spi_transfer *xfer,
0785                 dma_addr_t *tx_dma,
0786                 dma_addr_t *rx_dma,
0787                 u32 *plen)
0788 {
0789     *rx_dma = xfer->rx_dma + xfer->len - *plen;
0790     *tx_dma = xfer->tx_dma + xfer->len - *plen;
0791     if (*plen > master->max_dma_len)
0792         *plen = master->max_dma_len;
0793 }
0794 
0795 static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
0796                     struct spi_device *spi,
0797                     struct spi_transfer *xfer)
0798 {
0799     u32         scbr, csr;
0800     unsigned long       bus_hz;
0801     int chip_select;
0802 
0803     if (spi->cs_gpiod)
0804         chip_select = as->native_cs_for_gpio;
0805     else
0806         chip_select = spi->chip_select;
0807 
0808     /* v1 chips start out at half the peripheral bus speed. */
0809     bus_hz = as->spi_clk;
0810     if (!atmel_spi_is_v2(as))
0811         bus_hz /= 2;
0812 
0813     /*
0814      * Calculate the lowest divider that satisfies the
0815      * constraint, assuming div32/fdiv/mbz == 0.
0816      */
0817     scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
0818 
0819     /*
0820      * If the resulting divider doesn't fit into the
0821      * register bitfield, we can't satisfy the constraint.
0822      */
0823     if (scbr >= (1 << SPI_SCBR_SIZE)) {
0824         dev_err(&spi->dev,
0825             "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
0826             xfer->speed_hz, scbr, bus_hz/255);
0827         return -EINVAL;
0828     }
0829     if (scbr == 0) {
0830         dev_err(&spi->dev,
0831             "setup: %d Hz too high, scbr %u; max %ld Hz\n",
0832             xfer->speed_hz, scbr, bus_hz);
0833         return -EINVAL;
0834     }
0835     csr = spi_readl(as, CSR0 + 4 * chip_select);
0836     csr = SPI_BFINS(SCBR, scbr, csr);
0837     spi_writel(as, CSR0 + 4 * chip_select, csr);
0838     xfer->effective_speed_hz = bus_hz / scbr;
0839 
0840     return 0;
0841 }
0842 
0843 /*
0844  * Submit next transfer for PDC.
0845  * lock is held, spi irq is blocked
0846  */
0847 static void atmel_spi_pdc_next_xfer(struct spi_master *master,
0848                     struct spi_transfer *xfer)
0849 {
0850     struct atmel_spi    *as = spi_master_get_devdata(master);
0851     u32         len;
0852     dma_addr_t      tx_dma, rx_dma;
0853 
0854     spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
0855 
0856     len = as->current_remaining_bytes;
0857     atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
0858     as->current_remaining_bytes -= len;
0859 
0860     spi_writel(as, RPR, rx_dma);
0861     spi_writel(as, TPR, tx_dma);
0862 
0863     if (xfer->bits_per_word > 8)
0864         len >>= 1;
0865     spi_writel(as, RCR, len);
0866     spi_writel(as, TCR, len);
0867 
0868     dev_dbg(&master->dev,
0869         "  start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
0870         xfer, xfer->len, xfer->tx_buf,
0871         (unsigned long long)xfer->tx_dma, xfer->rx_buf,
0872         (unsigned long long)xfer->rx_dma);
0873 
0874     if (as->current_remaining_bytes) {
0875         len = as->current_remaining_bytes;
0876         atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
0877         as->current_remaining_bytes -= len;
0878 
0879         spi_writel(as, RNPR, rx_dma);
0880         spi_writel(as, TNPR, tx_dma);
0881 
0882         if (xfer->bits_per_word > 8)
0883             len >>= 1;
0884         spi_writel(as, RNCR, len);
0885         spi_writel(as, TNCR, len);
0886 
0887         dev_dbg(&master->dev,
0888             "  next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
0889             xfer, xfer->len, xfer->tx_buf,
0890             (unsigned long long)xfer->tx_dma, xfer->rx_buf,
0891             (unsigned long long)xfer->rx_dma);
0892     }
0893 
0894     /* REVISIT: We're waiting for RXBUFF before we start the next
0895      * transfer because we need to handle some difficult timing
0896      * issues otherwise. If we wait for TXBUFE in one transfer and
0897      * then starts waiting for RXBUFF in the next, it's difficult
0898      * to tell the difference between the RXBUFF interrupt we're
0899      * actually waiting for and the RXBUFF interrupt of the
0900      * previous transfer.
0901      *
0902      * It should be doable, though. Just not now...
0903      */
0904     spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
0905     spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
0906 }
0907 
0908 /*
0909  * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
0910  *  - The buffer is either valid for CPU access, else NULL
0911  *  - If the buffer is valid, so is its DMA address
0912  *
0913  * This driver manages the dma address unless message->is_dma_mapped.
0914  */
0915 static int
0916 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
0917 {
0918     struct device   *dev = &as->pdev->dev;
0919 
0920     xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
0921     if (xfer->tx_buf) {
0922         /* tx_buf is a const void* where we need a void * for the dma
0923          * mapping */
0924         void *nonconst_tx = (void *)xfer->tx_buf;
0925 
0926         xfer->tx_dma = dma_map_single(dev,
0927                 nonconst_tx, xfer->len,
0928                 DMA_TO_DEVICE);
0929         if (dma_mapping_error(dev, xfer->tx_dma))
0930             return -ENOMEM;
0931     }
0932     if (xfer->rx_buf) {
0933         xfer->rx_dma = dma_map_single(dev,
0934                 xfer->rx_buf, xfer->len,
0935                 DMA_FROM_DEVICE);
0936         if (dma_mapping_error(dev, xfer->rx_dma)) {
0937             if (xfer->tx_buf)
0938                 dma_unmap_single(dev,
0939                         xfer->tx_dma, xfer->len,
0940                         DMA_TO_DEVICE);
0941             return -ENOMEM;
0942         }
0943     }
0944     return 0;
0945 }
0946 
0947 static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
0948                      struct spi_transfer *xfer)
0949 {
0950     if (xfer->tx_dma != INVALID_DMA_ADDRESS)
0951         dma_unmap_single(master->dev.parent, xfer->tx_dma,
0952                  xfer->len, DMA_TO_DEVICE);
0953     if (xfer->rx_dma != INVALID_DMA_ADDRESS)
0954         dma_unmap_single(master->dev.parent, xfer->rx_dma,
0955                  xfer->len, DMA_FROM_DEVICE);
0956 }
0957 
0958 static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
0959 {
0960     spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
0961 }
0962 
0963 static void
0964 atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
0965 {
0966     u8      *rxp;
0967     u16     *rxp16;
0968     unsigned long   xfer_pos = xfer->len - as->current_remaining_bytes;
0969 
0970     if (xfer->bits_per_word > 8) {
0971         rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
0972         *rxp16 = spi_readl(as, RDR);
0973     } else {
0974         rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
0975         *rxp = spi_readl(as, RDR);
0976     }
0977     if (xfer->bits_per_word > 8) {
0978         if (as->current_remaining_bytes > 2)
0979             as->current_remaining_bytes -= 2;
0980         else
0981             as->current_remaining_bytes = 0;
0982     } else {
0983         as->current_remaining_bytes--;
0984     }
0985 }
0986 
0987 static void
0988 atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
0989 {
0990     u32 fifolr = spi_readl(as, FLR);
0991     u32 num_bytes, num_data = SPI_BFEXT(RXFL, fifolr);
0992     u32 offset = xfer->len - as->current_remaining_bytes;
0993     u16 *words = (u16 *)((u8 *)xfer->rx_buf + offset);
0994     u8  *bytes = (u8  *)((u8 *)xfer->rx_buf + offset);
0995     u16 rd; /* RD field is the lowest 16 bits of RDR */
0996 
0997     /* Update the number of remaining bytes to transfer */
0998     num_bytes = ((xfer->bits_per_word > 8) ?
0999              (num_data << 1) :
1000              num_data);
1001 
1002     if (as->current_remaining_bytes > num_bytes)
1003         as->current_remaining_bytes -= num_bytes;
1004     else
1005         as->current_remaining_bytes = 0;
1006 
1007     /* Handle odd number of bytes when data are more than 8bit width */
1008     if (xfer->bits_per_word > 8)
1009         as->current_remaining_bytes &= ~0x1;
1010 
1011     /* Read data */
1012     while (num_data) {
1013         rd = spi_readl(as, RDR);
1014         if (xfer->bits_per_word > 8)
1015             *words++ = rd;
1016         else
1017             *bytes++ = rd;
1018         num_data--;
1019     }
1020 }
1021 
1022 /* Called from IRQ
1023  *
1024  * Must update "current_remaining_bytes" to keep track of data
1025  * to transfer.
1026  */
1027 static void
1028 atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
1029 {
1030     if (as->fifo_size)
1031         atmel_spi_pump_fifo_data(as, xfer);
1032     else
1033         atmel_spi_pump_single_data(as, xfer);
1034 }
1035 
1036 /* Interrupt
1037  *
1038  */
1039 static irqreturn_t
1040 atmel_spi_pio_interrupt(int irq, void *dev_id)
1041 {
1042     struct spi_master   *master = dev_id;
1043     struct atmel_spi    *as = spi_master_get_devdata(master);
1044     u32         status, pending, imr;
1045     struct spi_transfer *xfer;
1046     int         ret = IRQ_NONE;
1047 
1048     imr = spi_readl(as, IMR);
1049     status = spi_readl(as, SR);
1050     pending = status & imr;
1051 
1052     if (pending & SPI_BIT(OVRES)) {
1053         ret = IRQ_HANDLED;
1054         spi_writel(as, IDR, SPI_BIT(OVRES));
1055         dev_warn(master->dev.parent, "overrun\n");
1056 
1057         /*
1058          * When we get an overrun, we disregard the current
1059          * transfer. Data will not be copied back from any
1060          * bounce buffer and msg->actual_len will not be
1061          * updated with the last xfer.
1062          *
1063          * We will also not process any remaning transfers in
1064          * the message.
1065          */
1066         as->done_status = -EIO;
1067         smp_wmb();
1068 
1069         /* Clear any overrun happening while cleaning up */
1070         spi_readl(as, SR);
1071 
1072         complete(&as->xfer_completion);
1073 
1074     } else if (pending & (SPI_BIT(RDRF) | SPI_BIT(RXFTHF))) {
1075         atmel_spi_lock(as);
1076 
1077         if (as->current_remaining_bytes) {
1078             ret = IRQ_HANDLED;
1079             xfer = as->current_transfer;
1080             atmel_spi_pump_pio_data(as, xfer);
1081             if (!as->current_remaining_bytes)
1082                 spi_writel(as, IDR, pending);
1083 
1084             complete(&as->xfer_completion);
1085         }
1086 
1087         atmel_spi_unlock(as);
1088     } else {
1089         WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
1090         ret = IRQ_HANDLED;
1091         spi_writel(as, IDR, pending);
1092     }
1093 
1094     return ret;
1095 }
1096 
1097 static irqreturn_t
1098 atmel_spi_pdc_interrupt(int irq, void *dev_id)
1099 {
1100     struct spi_master   *master = dev_id;
1101     struct atmel_spi    *as = spi_master_get_devdata(master);
1102     u32         status, pending, imr;
1103     int         ret = IRQ_NONE;
1104 
1105     imr = spi_readl(as, IMR);
1106     status = spi_readl(as, SR);
1107     pending = status & imr;
1108 
1109     if (pending & SPI_BIT(OVRES)) {
1110 
1111         ret = IRQ_HANDLED;
1112 
1113         spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
1114                      | SPI_BIT(OVRES)));
1115 
1116         /* Clear any overrun happening while cleaning up */
1117         spi_readl(as, SR);
1118 
1119         as->done_status = -EIO;
1120 
1121         complete(&as->xfer_completion);
1122 
1123     } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
1124         ret = IRQ_HANDLED;
1125 
1126         spi_writel(as, IDR, pending);
1127 
1128         complete(&as->xfer_completion);
1129     }
1130 
1131     return ret;
1132 }
1133 
1134 static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
1135 {
1136     struct spi_delay *delay = &spi->word_delay;
1137     u32 value = delay->value;
1138 
1139     switch (delay->unit) {
1140     case SPI_DELAY_UNIT_NSECS:
1141         value /= 1000;
1142         break;
1143     case SPI_DELAY_UNIT_USECS:
1144         break;
1145     default:
1146         return -EINVAL;
1147     }
1148 
1149     return (as->spi_clk / 1000000 * value) >> 5;
1150 }
1151 
1152 static void initialize_native_cs_for_gpio(struct atmel_spi *as)
1153 {
1154     int i;
1155     struct spi_master *master = platform_get_drvdata(as->pdev);
1156 
1157     if (!as->native_cs_free)
1158         return; /* already initialized */
1159 
1160     if (!master->cs_gpiods)
1161         return; /* No CS GPIO */
1162 
1163     /*
1164      * On the first version of the controller (AT91RM9200), CS0
1165      * can't be used associated with GPIO
1166      */
1167     if (atmel_spi_is_v2(as))
1168         i = 0;
1169     else
1170         i = 1;
1171 
1172     for (; i < 4; i++)
1173         if (master->cs_gpiods[i])
1174             as->native_cs_free |= BIT(i);
1175 
1176     if (as->native_cs_free)
1177         as->native_cs_for_gpio = ffs(as->native_cs_free);
1178 }
1179 
1180 static int atmel_spi_setup(struct spi_device *spi)
1181 {
1182     struct atmel_spi    *as;
1183     struct atmel_spi_device *asd;
1184     u32         csr;
1185     unsigned int        bits = spi->bits_per_word;
1186     int chip_select;
1187     int         word_delay_csr;
1188 
1189     as = spi_master_get_devdata(spi->master);
1190 
1191     /* see notes above re chipselect */
1192     if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH)) {
1193         dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
1194         return -EINVAL;
1195     }
1196 
1197     /* Setup() is called during spi_register_controller(aka
1198      * spi_register_master) but after all membmers of the cs_gpiod
1199      * array have been filled, so we can looked for which native
1200      * CS will be free for using with GPIO
1201      */
1202     initialize_native_cs_for_gpio(as);
1203 
1204     if (spi->cs_gpiod && as->native_cs_free) {
1205         dev_err(&spi->dev,
1206             "No native CS available to support this GPIO CS\n");
1207         return -EBUSY;
1208     }
1209 
1210     if (spi->cs_gpiod)
1211         chip_select = as->native_cs_for_gpio;
1212     else
1213         chip_select = spi->chip_select;
1214 
1215     csr = SPI_BF(BITS, bits - 8);
1216     if (spi->mode & SPI_CPOL)
1217         csr |= SPI_BIT(CPOL);
1218     if (!(spi->mode & SPI_CPHA))
1219         csr |= SPI_BIT(NCPHA);
1220 
1221     if (!spi->cs_gpiod)
1222         csr |= SPI_BIT(CSAAT);
1223     csr |= SPI_BF(DLYBS, 0);
1224 
1225     word_delay_csr = atmel_word_delay_csr(spi, as);
1226     if (word_delay_csr < 0)
1227         return word_delay_csr;
1228 
1229     /* DLYBCT adds delays between words.  This is useful for slow devices
1230      * that need a bit of time to setup the next transfer.
1231      */
1232     csr |= SPI_BF(DLYBCT, word_delay_csr);
1233 
1234     asd = spi->controller_state;
1235     if (!asd) {
1236         asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
1237         if (!asd)
1238             return -ENOMEM;
1239 
1240         spi->controller_state = asd;
1241     }
1242 
1243     asd->csr = csr;
1244 
1245     dev_dbg(&spi->dev,
1246         "setup: bpw %u mode 0x%x -> csr%d %08x\n",
1247         bits, spi->mode, spi->chip_select, csr);
1248 
1249     if (!atmel_spi_is_v2(as))
1250         spi_writel(as, CSR0 + 4 * chip_select, csr);
1251 
1252     return 0;
1253 }
1254 
1255 static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
1256 {
1257     struct atmel_spi *as = spi_master_get_devdata(spi->master);
1258     /* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
1259      * since we already have routines for activate/deactivate translate
1260      * high/low to active/inactive
1261      */
1262     enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
1263 
1264     if (enable) {
1265         cs_activate(as, spi);
1266     } else {
1267         cs_deactivate(as, spi);
1268     }
1269 
1270 }
1271 
1272 static int atmel_spi_one_transfer(struct spi_master *master,
1273                     struct spi_device *spi,
1274                     struct spi_transfer *xfer)
1275 {
1276     struct atmel_spi    *as;
1277     u8          bits;
1278     u32         len;
1279     struct atmel_spi_device *asd;
1280     int         timeout;
1281     int         ret;
1282     unsigned long       dma_timeout;
1283 
1284     as = spi_master_get_devdata(master);
1285 
1286     asd = spi->controller_state;
1287     bits = (asd->csr >> 4) & 0xf;
1288     if (bits != xfer->bits_per_word - 8) {
1289         dev_dbg(&spi->dev,
1290             "you can't yet change bits_per_word in transfers\n");
1291         return -ENOPROTOOPT;
1292     }
1293 
1294     /*
1295      * DMA map early, for performance (empties dcache ASAP) and
1296      * better fault reporting.
1297      */
1298     if ((!master->cur_msg->is_dma_mapped)
1299         && as->use_pdc) {
1300         if (atmel_spi_dma_map_xfer(as, xfer) < 0)
1301             return -ENOMEM;
1302     }
1303 
1304     atmel_spi_set_xfer_speed(as, spi, xfer);
1305 
1306     as->done_status = 0;
1307     as->current_transfer = xfer;
1308     as->current_remaining_bytes = xfer->len;
1309     while (as->current_remaining_bytes) {
1310         reinit_completion(&as->xfer_completion);
1311 
1312         if (as->use_pdc) {
1313             atmel_spi_lock(as);
1314             atmel_spi_pdc_next_xfer(master, xfer);
1315             atmel_spi_unlock(as);
1316         } else if (atmel_spi_use_dma(as, xfer)) {
1317             len = as->current_remaining_bytes;
1318             ret = atmel_spi_next_xfer_dma_submit(master,
1319                                 xfer, &len);
1320             if (ret) {
1321                 dev_err(&spi->dev,
1322                     "unable to use DMA, fallback to PIO\n");
1323                 as->done_status = ret;
1324                 break;
1325             } else {
1326                 as->current_remaining_bytes -= len;
1327                 if (as->current_remaining_bytes < 0)
1328                     as->current_remaining_bytes = 0;
1329             }
1330         } else {
1331             atmel_spi_lock(as);
1332             atmel_spi_next_xfer_pio(master, xfer);
1333             atmel_spi_unlock(as);
1334         }
1335 
1336         dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
1337                               SPI_DMA_TIMEOUT);
1338         if (WARN_ON(dma_timeout == 0)) {
1339             dev_err(&spi->dev, "spi transfer timeout\n");
1340             as->done_status = -EIO;
1341         }
1342 
1343         if (as->done_status)
1344             break;
1345     }
1346 
1347     if (as->done_status) {
1348         if (as->use_pdc) {
1349             dev_warn(master->dev.parent,
1350                 "overrun (%u/%u remaining)\n",
1351                 spi_readl(as, TCR), spi_readl(as, RCR));
1352 
1353             /*
1354              * Clean up DMA registers and make sure the data
1355              * registers are empty.
1356              */
1357             spi_writel(as, RNCR, 0);
1358             spi_writel(as, TNCR, 0);
1359             spi_writel(as, RCR, 0);
1360             spi_writel(as, TCR, 0);
1361             for (timeout = 1000; timeout; timeout--)
1362                 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
1363                     break;
1364             if (!timeout)
1365                 dev_warn(master->dev.parent,
1366                      "timeout waiting for TXEMPTY");
1367             while (spi_readl(as, SR) & SPI_BIT(RDRF))
1368                 spi_readl(as, RDR);
1369 
1370             /* Clear any overrun happening while cleaning up */
1371             spi_readl(as, SR);
1372 
1373         } else if (atmel_spi_use_dma(as, xfer)) {
1374             atmel_spi_stop_dma(master);
1375         }
1376     }
1377 
1378     if (!master->cur_msg->is_dma_mapped
1379         && as->use_pdc)
1380         atmel_spi_dma_unmap_xfer(master, xfer);
1381 
1382     if (as->use_pdc)
1383         atmel_spi_disable_pdc_transfer(as);
1384 
1385     return as->done_status;
1386 }
1387 
1388 static void atmel_spi_cleanup(struct spi_device *spi)
1389 {
1390     struct atmel_spi_device *asd = spi->controller_state;
1391 
1392     if (!asd)
1393         return;
1394 
1395     spi->controller_state = NULL;
1396     kfree(asd);
1397 }
1398 
1399 static inline unsigned int atmel_get_version(struct atmel_spi *as)
1400 {
1401     return spi_readl(as, VERSION) & 0x00000fff;
1402 }
1403 
1404 static void atmel_get_caps(struct atmel_spi *as)
1405 {
1406     unsigned int version;
1407 
1408     version = atmel_get_version(as);
1409 
1410     as->caps.is_spi2 = version > 0x121;
1411     as->caps.has_wdrbt = version >= 0x210;
1412     as->caps.has_dma_support = version >= 0x212;
1413     as->caps.has_pdc_support = version < 0x212;
1414 }
1415 
1416 static void atmel_spi_init(struct atmel_spi *as)
1417 {
1418     spi_writel(as, CR, SPI_BIT(SWRST));
1419     spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1420 
1421     /* It is recommended to enable FIFOs first thing after reset */
1422     if (as->fifo_size)
1423         spi_writel(as, CR, SPI_BIT(FIFOEN));
1424 
1425     if (as->caps.has_wdrbt) {
1426         spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
1427                 | SPI_BIT(MSTR));
1428     } else {
1429         spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
1430     }
1431 
1432     if (as->use_pdc)
1433         spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
1434     spi_writel(as, CR, SPI_BIT(SPIEN));
1435 }
1436 
1437 static int atmel_spi_probe(struct platform_device *pdev)
1438 {
1439     struct resource     *regs;
1440     int         irq;
1441     struct clk      *clk;
1442     int         ret;
1443     struct spi_master   *master;
1444     struct atmel_spi    *as;
1445 
1446     /* Select default pin state */
1447     pinctrl_pm_select_default_state(&pdev->dev);
1448 
1449     regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1450     if (!regs)
1451         return -ENXIO;
1452 
1453     irq = platform_get_irq(pdev, 0);
1454     if (irq < 0)
1455         return irq;
1456 
1457     clk = devm_clk_get(&pdev->dev, "spi_clk");
1458     if (IS_ERR(clk))
1459         return PTR_ERR(clk);
1460 
1461     /* setup spi core then atmel-specific driver state */
1462     master = spi_alloc_master(&pdev->dev, sizeof(*as));
1463     if (!master)
1464         return -ENOMEM;
1465 
1466     /* the spi->mode bits understood by this driver: */
1467     master->use_gpio_descriptors = true;
1468     master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1469     master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
1470     master->dev.of_node = pdev->dev.of_node;
1471     master->bus_num = pdev->id;
1472     master->num_chipselect = 4;
1473     master->setup = atmel_spi_setup;
1474     master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
1475             SPI_MASTER_GPIO_SS);
1476     master->transfer_one = atmel_spi_one_transfer;
1477     master->set_cs = atmel_spi_set_cs;
1478     master->cleanup = atmel_spi_cleanup;
1479     master->auto_runtime_pm = true;
1480     master->max_dma_len = SPI_MAX_DMA_XFER;
1481     master->can_dma = atmel_spi_can_dma;
1482     platform_set_drvdata(pdev, master);
1483 
1484     as = spi_master_get_devdata(master);
1485 
1486     spin_lock_init(&as->lock);
1487 
1488     as->pdev = pdev;
1489     as->regs = devm_ioremap_resource(&pdev->dev, regs);
1490     if (IS_ERR(as->regs)) {
1491         ret = PTR_ERR(as->regs);
1492         goto out_unmap_regs;
1493     }
1494     as->phybase = regs->start;
1495     as->irq = irq;
1496     as->clk = clk;
1497 
1498     init_completion(&as->xfer_completion);
1499 
1500     atmel_get_caps(as);
1501 
1502     as->use_dma = false;
1503     as->use_pdc = false;
1504     if (as->caps.has_dma_support) {
1505         ret = atmel_spi_configure_dma(master, as);
1506         if (ret == 0) {
1507             as->use_dma = true;
1508         } else if (ret == -EPROBE_DEFER) {
1509             goto out_unmap_regs;
1510         }
1511     } else if (as->caps.has_pdc_support) {
1512         as->use_pdc = true;
1513     }
1514 
1515     if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
1516         as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
1517                               SPI_MAX_DMA_XFER,
1518                               &as->dma_addr_rx_bbuf,
1519                               GFP_KERNEL | GFP_DMA);
1520         if (!as->addr_rx_bbuf) {
1521             as->use_dma = false;
1522         } else {
1523             as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
1524                     SPI_MAX_DMA_XFER,
1525                     &as->dma_addr_tx_bbuf,
1526                     GFP_KERNEL | GFP_DMA);
1527             if (!as->addr_tx_bbuf) {
1528                 as->use_dma = false;
1529                 dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1530                           as->addr_rx_bbuf,
1531                           as->dma_addr_rx_bbuf);
1532             }
1533         }
1534         if (!as->use_dma)
1535             dev_info(master->dev.parent,
1536                  "  can not allocate dma coherent memory\n");
1537     }
1538 
1539     if (as->caps.has_dma_support && !as->use_dma)
1540         dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
1541 
1542     if (as->use_pdc) {
1543         ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
1544                     0, dev_name(&pdev->dev), master);
1545     } else {
1546         ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
1547                     0, dev_name(&pdev->dev), master);
1548     }
1549     if (ret)
1550         goto out_unmap_regs;
1551 
1552     /* Initialize the hardware */
1553     ret = clk_prepare_enable(clk);
1554     if (ret)
1555         goto out_free_irq;
1556 
1557     as->spi_clk = clk_get_rate(clk);
1558 
1559     as->fifo_size = 0;
1560     if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
1561                   &as->fifo_size)) {
1562         dev_info(&pdev->dev, "Using FIFO (%u data)\n", as->fifo_size);
1563     }
1564 
1565     atmel_spi_init(as);
1566 
1567     pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
1568     pm_runtime_use_autosuspend(&pdev->dev);
1569     pm_runtime_set_active(&pdev->dev);
1570     pm_runtime_enable(&pdev->dev);
1571 
1572     ret = devm_spi_register_master(&pdev->dev, master);
1573     if (ret)
1574         goto out_free_dma;
1575 
1576     /* go! */
1577     dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
1578             atmel_get_version(as), (unsigned long)regs->start,
1579             irq);
1580 
1581     return 0;
1582 
1583 out_free_dma:
1584     pm_runtime_disable(&pdev->dev);
1585     pm_runtime_set_suspended(&pdev->dev);
1586 
1587     if (as->use_dma)
1588         atmel_spi_release_dma(master);
1589 
1590     spi_writel(as, CR, SPI_BIT(SWRST));
1591     spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1592     clk_disable_unprepare(clk);
1593 out_free_irq:
1594 out_unmap_regs:
1595     spi_master_put(master);
1596     return ret;
1597 }
1598 
1599 static int atmel_spi_remove(struct platform_device *pdev)
1600 {
1601     struct spi_master   *master = platform_get_drvdata(pdev);
1602     struct atmel_spi    *as = spi_master_get_devdata(master);
1603 
1604     pm_runtime_get_sync(&pdev->dev);
1605 
1606     /* reset the hardware and block queue progress */
1607     if (as->use_dma) {
1608         atmel_spi_stop_dma(master);
1609         atmel_spi_release_dma(master);
1610         if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
1611             dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1612                       as->addr_tx_bbuf,
1613                       as->dma_addr_tx_bbuf);
1614             dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1615                       as->addr_rx_bbuf,
1616                       as->dma_addr_rx_bbuf);
1617         }
1618     }
1619 
1620     spin_lock_irq(&as->lock);
1621     spi_writel(as, CR, SPI_BIT(SWRST));
1622     spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1623     spi_readl(as, SR);
1624     spin_unlock_irq(&as->lock);
1625 
1626     clk_disable_unprepare(as->clk);
1627 
1628     pm_runtime_put_noidle(&pdev->dev);
1629     pm_runtime_disable(&pdev->dev);
1630 
1631     return 0;
1632 }
1633 
1634 static int atmel_spi_runtime_suspend(struct device *dev)
1635 {
1636     struct spi_master *master = dev_get_drvdata(dev);
1637     struct atmel_spi *as = spi_master_get_devdata(master);
1638 
1639     clk_disable_unprepare(as->clk);
1640     pinctrl_pm_select_sleep_state(dev);
1641 
1642     return 0;
1643 }
1644 
1645 static int atmel_spi_runtime_resume(struct device *dev)
1646 {
1647     struct spi_master *master = dev_get_drvdata(dev);
1648     struct atmel_spi *as = spi_master_get_devdata(master);
1649 
1650     pinctrl_pm_select_default_state(dev);
1651 
1652     return clk_prepare_enable(as->clk);
1653 }
1654 
1655 static int atmel_spi_suspend(struct device *dev)
1656 {
1657     struct spi_master *master = dev_get_drvdata(dev);
1658     int ret;
1659 
1660     /* Stop the queue running */
1661     ret = spi_master_suspend(master);
1662     if (ret)
1663         return ret;
1664 
1665     if (!pm_runtime_suspended(dev))
1666         atmel_spi_runtime_suspend(dev);
1667 
1668     return 0;
1669 }
1670 
1671 static int atmel_spi_resume(struct device *dev)
1672 {
1673     struct spi_master *master = dev_get_drvdata(dev);
1674     struct atmel_spi *as = spi_master_get_devdata(master);
1675     int ret;
1676 
1677     ret = clk_prepare_enable(as->clk);
1678     if (ret)
1679         return ret;
1680 
1681     atmel_spi_init(as);
1682 
1683     clk_disable_unprepare(as->clk);
1684 
1685     if (!pm_runtime_suspended(dev)) {
1686         ret = atmel_spi_runtime_resume(dev);
1687         if (ret)
1688             return ret;
1689     }
1690 
1691     /* Start the queue running */
1692     return spi_master_resume(master);
1693 }
1694 
1695 static const struct dev_pm_ops atmel_spi_pm_ops = {
1696     SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
1697     RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
1698                atmel_spi_runtime_resume, NULL)
1699 };
1700 
1701 static const struct of_device_id atmel_spi_dt_ids[] = {
1702     { .compatible = "atmel,at91rm9200-spi" },
1703     { /* sentinel */ }
1704 };
1705 
1706 MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
1707 
1708 static struct platform_driver atmel_spi_driver = {
1709     .driver     = {
1710         .name   = "atmel_spi",
1711         .pm = pm_ptr(&atmel_spi_pm_ops),
1712         .of_match_table = atmel_spi_dt_ids,
1713     },
1714     .probe      = atmel_spi_probe,
1715     .remove     = atmel_spi_remove,
1716 };
1717 module_platform_driver(atmel_spi_driver);
1718 
1719 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
1720 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1721 MODULE_LICENSE("GPL");
1722 MODULE_ALIAS("platform:atmel_spi");