Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * SPI driver for NVIDIA's Tegra114 SPI Controller.
0004  *
0005  * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
0006  */
0007 
0008 #include <linux/clk.h>
0009 #include <linux/completion.h>
0010 #include <linux/delay.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/dmapool.h>
0014 #include <linux/err.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/kernel.h>
0018 #include <linux/kthread.h>
0019 #include <linux/module.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/pm_runtime.h>
0022 #include <linux/of.h>
0023 #include <linux/of_device.h>
0024 #include <linux/reset.h>
0025 #include <linux/spi/spi.h>
0026 
0027 #define SPI_COMMAND1                0x000
0028 #define SPI_BIT_LENGTH(x)           (((x) & 0x1f) << 0)
0029 #define SPI_PACKED              (1 << 5)
0030 #define SPI_TX_EN               (1 << 11)
0031 #define SPI_RX_EN               (1 << 12)
0032 #define SPI_BOTH_EN_BYTE            (1 << 13)
0033 #define SPI_BOTH_EN_BIT             (1 << 14)
0034 #define SPI_LSBYTE_FE               (1 << 15)
0035 #define SPI_LSBIT_FE                (1 << 16)
0036 #define SPI_BIDIROE             (1 << 17)
0037 #define SPI_IDLE_SDA_DRIVE_LOW          (0 << 18)
0038 #define SPI_IDLE_SDA_DRIVE_HIGH         (1 << 18)
0039 #define SPI_IDLE_SDA_PULL_LOW           (2 << 18)
0040 #define SPI_IDLE_SDA_PULL_HIGH          (3 << 18)
0041 #define SPI_IDLE_SDA_MASK           (3 << 18)
0042 #define SPI_CS_SW_VAL               (1 << 20)
0043 #define SPI_CS_SW_HW                (1 << 21)
0044 /* SPI_CS_POL_INACTIVE bits are default high */
0045                         /* n from 0 to 3 */
0046 #define SPI_CS_POL_INACTIVE(n)          (1 << (22 + (n)))
0047 #define SPI_CS_POL_INACTIVE_MASK        (0xF << 22)
0048 
0049 #define SPI_CS_SEL_0                (0 << 26)
0050 #define SPI_CS_SEL_1                (1 << 26)
0051 #define SPI_CS_SEL_2                (2 << 26)
0052 #define SPI_CS_SEL_3                (3 << 26)
0053 #define SPI_CS_SEL_MASK             (3 << 26)
0054 #define SPI_CS_SEL(x)               (((x) & 0x3) << 26)
0055 #define SPI_CONTROL_MODE_0          (0 << 28)
0056 #define SPI_CONTROL_MODE_1          (1 << 28)
0057 #define SPI_CONTROL_MODE_2          (2 << 28)
0058 #define SPI_CONTROL_MODE_3          (3 << 28)
0059 #define SPI_CONTROL_MODE_MASK           (3 << 28)
0060 #define SPI_MODE_SEL(x)             (((x) & 0x3) << 28)
0061 #define SPI_M_S                 (1 << 30)
0062 #define SPI_PIO                 (1 << 31)
0063 
0064 #define SPI_COMMAND2                0x004
0065 #define SPI_TX_TAP_DELAY(x)         (((x) & 0x3F) << 6)
0066 #define SPI_RX_TAP_DELAY(x)         (((x) & 0x3F) << 0)
0067 
0068 #define SPI_CS_TIMING1              0x008
0069 #define SPI_SETUP_HOLD(setup, hold)     (((setup) << 4) | (hold))
0070 #define SPI_CS_SETUP_HOLD(reg, cs, val)         \
0071         ((((val) & 0xFFu) << ((cs) * 8)) |  \
0072         ((reg) & ~(0xFFu << ((cs) * 8))))
0073 
0074 #define SPI_CS_TIMING2              0x00C
0075 #define CYCLES_BETWEEN_PACKETS_0(x)     (((x) & 0x1F) << 0)
0076 #define CS_ACTIVE_BETWEEN_PACKETS_0     (1 << 5)
0077 #define CYCLES_BETWEEN_PACKETS_1(x)     (((x) & 0x1F) << 8)
0078 #define CS_ACTIVE_BETWEEN_PACKETS_1     (1 << 13)
0079 #define CYCLES_BETWEEN_PACKETS_2(x)     (((x) & 0x1F) << 16)
0080 #define CS_ACTIVE_BETWEEN_PACKETS_2     (1 << 21)
0081 #define CYCLES_BETWEEN_PACKETS_3(x)     (((x) & 0x1F) << 24)
0082 #define CS_ACTIVE_BETWEEN_PACKETS_3     (1 << 29)
0083 #define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val)     \
0084         (reg = (((val) & 0x1) << ((cs) * 8 + 5)) |  \
0085             ((reg) & ~(1 << ((cs) * 8 + 5))))
0086 #define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val)        \
0087         (reg = (((val) & 0x1F) << ((cs) * 8)) |     \
0088             ((reg) & ~(0x1F << ((cs) * 8))))
0089 #define MAX_SETUP_HOLD_CYCLES           16
0090 #define MAX_INACTIVE_CYCLES         32
0091 
0092 #define SPI_TRANS_STATUS            0x010
0093 #define SPI_BLK_CNT(val)            (((val) >> 0) & 0xFFFF)
0094 #define SPI_SLV_IDLE_COUNT(val)         (((val) >> 16) & 0xFF)
0095 #define SPI_RDY                 (1 << 30)
0096 
0097 #define SPI_FIFO_STATUS             0x014
0098 #define SPI_RX_FIFO_EMPTY           (1 << 0)
0099 #define SPI_RX_FIFO_FULL            (1 << 1)
0100 #define SPI_TX_FIFO_EMPTY           (1 << 2)
0101 #define SPI_TX_FIFO_FULL            (1 << 3)
0102 #define SPI_RX_FIFO_UNF             (1 << 4)
0103 #define SPI_RX_FIFO_OVF             (1 << 5)
0104 #define SPI_TX_FIFO_UNF             (1 << 6)
0105 #define SPI_TX_FIFO_OVF             (1 << 7)
0106 #define SPI_ERR                 (1 << 8)
0107 #define SPI_TX_FIFO_FLUSH           (1 << 14)
0108 #define SPI_RX_FIFO_FLUSH           (1 << 15)
0109 #define SPI_TX_FIFO_EMPTY_COUNT(val)        (((val) >> 16) & 0x7F)
0110 #define SPI_RX_FIFO_FULL_COUNT(val)     (((val) >> 23) & 0x7F)
0111 #define SPI_FRAME_END               (1 << 30)
0112 #define SPI_CS_INACTIVE             (1 << 31)
0113 
0114 #define SPI_FIFO_ERROR              (SPI_RX_FIFO_UNF | \
0115             SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
0116 #define SPI_FIFO_EMPTY          (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
0117 
0118 #define SPI_TX_DATA             0x018
0119 #define SPI_RX_DATA             0x01C
0120 
0121 #define SPI_DMA_CTL             0x020
0122 #define SPI_TX_TRIG_1               (0 << 15)
0123 #define SPI_TX_TRIG_4               (1 << 15)
0124 #define SPI_TX_TRIG_8               (2 << 15)
0125 #define SPI_TX_TRIG_16              (3 << 15)
0126 #define SPI_TX_TRIG_MASK            (3 << 15)
0127 #define SPI_RX_TRIG_1               (0 << 19)
0128 #define SPI_RX_TRIG_4               (1 << 19)
0129 #define SPI_RX_TRIG_8               (2 << 19)
0130 #define SPI_RX_TRIG_16              (3 << 19)
0131 #define SPI_RX_TRIG_MASK            (3 << 19)
0132 #define SPI_IE_TX               (1 << 28)
0133 #define SPI_IE_RX               (1 << 29)
0134 #define SPI_CONT                (1 << 30)
0135 #define SPI_DMA                 (1 << 31)
0136 #define SPI_DMA_EN              SPI_DMA
0137 
0138 #define SPI_DMA_BLK             0x024
0139 #define SPI_DMA_BLK_SET(x)          (((x) & 0xFFFF) << 0)
0140 
0141 #define SPI_TX_FIFO             0x108
0142 #define SPI_RX_FIFO             0x188
0143 #define SPI_INTR_MASK               0x18c
0144 #define SPI_INTR_ALL_MASK           (0x1fUL << 25)
0145 #define MAX_CHIP_SELECT             4
0146 #define SPI_FIFO_DEPTH              64
0147 #define DATA_DIR_TX             (1 << 0)
0148 #define DATA_DIR_RX             (1 << 1)
0149 
0150 #define SPI_DMA_TIMEOUT             (msecs_to_jiffies(1000))
0151 #define DEFAULT_SPI_DMA_BUF_LEN         (16*1024)
0152 #define TX_FIFO_EMPTY_COUNT_MAX         SPI_TX_FIFO_EMPTY_COUNT(0x40)
0153 #define RX_FIFO_FULL_COUNT_ZERO         SPI_RX_FIFO_FULL_COUNT(0)
0154 #define MAX_HOLD_CYCLES             16
0155 #define SPI_DEFAULT_SPEED           25000000
0156 
0157 struct tegra_spi_soc_data {
0158     bool has_intr_mask_reg;
0159 };
0160 
0161 struct tegra_spi_client_data {
0162     int tx_clk_tap_delay;
0163     int rx_clk_tap_delay;
0164 };
0165 
0166 struct tegra_spi_data {
0167     struct device               *dev;
0168     struct spi_master           *master;
0169     spinlock_t              lock;
0170 
0171     struct clk              *clk;
0172     struct reset_control            *rst;
0173     void __iomem                *base;
0174     phys_addr_t             phys;
0175     unsigned                irq;
0176     u32                 cur_speed;
0177 
0178     struct spi_device           *cur_spi;
0179     struct spi_device           *cs_control;
0180     unsigned                cur_pos;
0181     unsigned                words_per_32bit;
0182     unsigned                bytes_per_word;
0183     unsigned                curr_dma_words;
0184     unsigned                cur_direction;
0185 
0186     unsigned                cur_rx_pos;
0187     unsigned                cur_tx_pos;
0188 
0189     unsigned                dma_buf_size;
0190     unsigned                max_buf_size;
0191     bool                    is_curr_dma_xfer;
0192     bool                    use_hw_based_cs;
0193 
0194     struct completion           rx_dma_complete;
0195     struct completion           tx_dma_complete;
0196 
0197     u32                 tx_status;
0198     u32                 rx_status;
0199     u32                 status_reg;
0200     bool                    is_packed;
0201 
0202     u32                 command1_reg;
0203     u32                 dma_control_reg;
0204     u32                 def_command1_reg;
0205     u32                 def_command2_reg;
0206     u32                 spi_cs_timing1;
0207     u32                 spi_cs_timing2;
0208     u8                  last_used_cs;
0209 
0210     struct completion           xfer_completion;
0211     struct spi_transfer         *curr_xfer;
0212     struct dma_chan             *rx_dma_chan;
0213     u32                 *rx_dma_buf;
0214     dma_addr_t              rx_dma_phys;
0215     struct dma_async_tx_descriptor      *rx_dma_desc;
0216 
0217     struct dma_chan             *tx_dma_chan;
0218     u32                 *tx_dma_buf;
0219     dma_addr_t              tx_dma_phys;
0220     struct dma_async_tx_descriptor      *tx_dma_desc;
0221     const struct tegra_spi_soc_data     *soc_data;
0222 };
0223 
0224 static int tegra_spi_runtime_suspend(struct device *dev);
0225 static int tegra_spi_runtime_resume(struct device *dev);
0226 
0227 static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
0228         unsigned long reg)
0229 {
0230     return readl(tspi->base + reg);
0231 }
0232 
0233 static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
0234         u32 val, unsigned long reg)
0235 {
0236     writel(val, tspi->base + reg);
0237 
0238     /* Read back register to make sure that register writes completed */
0239     if (reg != SPI_TX_FIFO)
0240         readl(tspi->base + SPI_COMMAND1);
0241 }
0242 
0243 static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
0244 {
0245     u32 val;
0246 
0247     /* Write 1 to clear status register */
0248     val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
0249     tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
0250 
0251     /* Clear fifo status error if any */
0252     val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
0253     if (val & SPI_ERR)
0254         tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
0255                 SPI_FIFO_STATUS);
0256 }
0257 
0258 static unsigned tegra_spi_calculate_curr_xfer_param(
0259     struct spi_device *spi, struct tegra_spi_data *tspi,
0260     struct spi_transfer *t)
0261 {
0262     unsigned remain_len = t->len - tspi->cur_pos;
0263     unsigned max_word;
0264     unsigned bits_per_word = t->bits_per_word;
0265     unsigned max_len;
0266     unsigned total_fifo_words;
0267 
0268     tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
0269 
0270     if ((bits_per_word == 8 || bits_per_word == 16 ||
0271          bits_per_word == 32) && t->len > 3) {
0272         tspi->is_packed = true;
0273         tspi->words_per_32bit = 32/bits_per_word;
0274     } else {
0275         tspi->is_packed = false;
0276         tspi->words_per_32bit = 1;
0277     }
0278 
0279     if (tspi->is_packed) {
0280         max_len = min(remain_len, tspi->max_buf_size);
0281         tspi->curr_dma_words = max_len/tspi->bytes_per_word;
0282         total_fifo_words = (max_len + 3) / 4;
0283     } else {
0284         max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
0285         max_word = min(max_word, tspi->max_buf_size/4);
0286         tspi->curr_dma_words = max_word;
0287         total_fifo_words = max_word;
0288     }
0289     return total_fifo_words;
0290 }
0291 
0292 static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
0293     struct tegra_spi_data *tspi, struct spi_transfer *t)
0294 {
0295     unsigned nbytes;
0296     unsigned tx_empty_count;
0297     u32 fifo_status;
0298     unsigned max_n_32bit;
0299     unsigned i, count;
0300     unsigned int written_words;
0301     unsigned fifo_words_left;
0302     u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
0303 
0304     fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
0305     tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
0306 
0307     if (tspi->is_packed) {
0308         fifo_words_left = tx_empty_count * tspi->words_per_32bit;
0309         written_words = min(fifo_words_left, tspi->curr_dma_words);
0310         nbytes = written_words * tspi->bytes_per_word;
0311         max_n_32bit = DIV_ROUND_UP(nbytes, 4);
0312         for (count = 0; count < max_n_32bit; count++) {
0313             u32 x = 0;
0314 
0315             for (i = 0; (i < 4) && nbytes; i++, nbytes--)
0316                 x |= (u32)(*tx_buf++) << (i * 8);
0317             tegra_spi_writel(tspi, x, SPI_TX_FIFO);
0318         }
0319 
0320         tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
0321     } else {
0322         unsigned int write_bytes;
0323         max_n_32bit = min(tspi->curr_dma_words,  tx_empty_count);
0324         written_words = max_n_32bit;
0325         nbytes = written_words * tspi->bytes_per_word;
0326         if (nbytes > t->len - tspi->cur_pos)
0327             nbytes = t->len - tspi->cur_pos;
0328         write_bytes = nbytes;
0329         for (count = 0; count < max_n_32bit; count++) {
0330             u32 x = 0;
0331 
0332             for (i = 0; nbytes && (i < tspi->bytes_per_word);
0333                             i++, nbytes--)
0334                 x |= (u32)(*tx_buf++) << (i * 8);
0335             tegra_spi_writel(tspi, x, SPI_TX_FIFO);
0336         }
0337 
0338         tspi->cur_tx_pos += write_bytes;
0339     }
0340 
0341     return written_words;
0342 }
0343 
0344 static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
0345         struct tegra_spi_data *tspi, struct spi_transfer *t)
0346 {
0347     unsigned rx_full_count;
0348     u32 fifo_status;
0349     unsigned i, count;
0350     unsigned int read_words = 0;
0351     unsigned len;
0352     u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
0353 
0354     fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
0355     rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
0356     if (tspi->is_packed) {
0357         len = tspi->curr_dma_words * tspi->bytes_per_word;
0358         for (count = 0; count < rx_full_count; count++) {
0359             u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
0360 
0361             for (i = 0; len && (i < 4); i++, len--)
0362                 *rx_buf++ = (x >> i*8) & 0xFF;
0363         }
0364         read_words += tspi->curr_dma_words;
0365         tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
0366     } else {
0367         u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
0368         u8 bytes_per_word = tspi->bytes_per_word;
0369         unsigned int read_bytes;
0370 
0371         len = rx_full_count * bytes_per_word;
0372         if (len > t->len - tspi->cur_pos)
0373             len = t->len - tspi->cur_pos;
0374         read_bytes = len;
0375         for (count = 0; count < rx_full_count; count++) {
0376             u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
0377 
0378             for (i = 0; len && (i < bytes_per_word); i++, len--)
0379                 *rx_buf++ = (x >> (i*8)) & 0xFF;
0380         }
0381         read_words += rx_full_count;
0382         tspi->cur_rx_pos += read_bytes;
0383     }
0384 
0385     return read_words;
0386 }
0387 
0388 static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
0389         struct tegra_spi_data *tspi, struct spi_transfer *t)
0390 {
0391     /* Make the dma buffer to read by cpu */
0392     dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
0393                 tspi->dma_buf_size, DMA_TO_DEVICE);
0394 
0395     if (tspi->is_packed) {
0396         unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
0397 
0398         memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
0399         tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
0400     } else {
0401         unsigned int i;
0402         unsigned int count;
0403         u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
0404         unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
0405         unsigned int write_bytes;
0406 
0407         if (consume > t->len - tspi->cur_pos)
0408             consume = t->len - tspi->cur_pos;
0409         write_bytes = consume;
0410         for (count = 0; count < tspi->curr_dma_words; count++) {
0411             u32 x = 0;
0412 
0413             for (i = 0; consume && (i < tspi->bytes_per_word);
0414                             i++, consume--)
0415                 x |= (u32)(*tx_buf++) << (i * 8);
0416             tspi->tx_dma_buf[count] = x;
0417         }
0418 
0419         tspi->cur_tx_pos += write_bytes;
0420     }
0421 
0422     /* Make the dma buffer to read by dma */
0423     dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
0424                 tspi->dma_buf_size, DMA_TO_DEVICE);
0425 }
0426 
0427 static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
0428         struct tegra_spi_data *tspi, struct spi_transfer *t)
0429 {
0430     /* Make the dma buffer to read by cpu */
0431     dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
0432         tspi->dma_buf_size, DMA_FROM_DEVICE);
0433 
0434     if (tspi->is_packed) {
0435         unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
0436 
0437         memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
0438         tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
0439     } else {
0440         unsigned int i;
0441         unsigned int count;
0442         unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
0443         u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
0444         unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
0445         unsigned int read_bytes;
0446 
0447         if (consume > t->len - tspi->cur_pos)
0448             consume = t->len - tspi->cur_pos;
0449         read_bytes = consume;
0450         for (count = 0; count < tspi->curr_dma_words; count++) {
0451             u32 x = tspi->rx_dma_buf[count] & rx_mask;
0452 
0453             for (i = 0; consume && (i < tspi->bytes_per_word);
0454                             i++, consume--)
0455                 *rx_buf++ = (x >> (i*8)) & 0xFF;
0456         }
0457 
0458         tspi->cur_rx_pos += read_bytes;
0459     }
0460 
0461     /* Make the dma buffer to read by dma */
0462     dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
0463         tspi->dma_buf_size, DMA_FROM_DEVICE);
0464 }
0465 
0466 static void tegra_spi_dma_complete(void *args)
0467 {
0468     struct completion *dma_complete = args;
0469 
0470     complete(dma_complete);
0471 }
0472 
0473 static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
0474 {
0475     reinit_completion(&tspi->tx_dma_complete);
0476     tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
0477                 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
0478                 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
0479     if (!tspi->tx_dma_desc) {
0480         dev_err(tspi->dev, "Not able to get desc for Tx\n");
0481         return -EIO;
0482     }
0483 
0484     tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
0485     tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
0486 
0487     dmaengine_submit(tspi->tx_dma_desc);
0488     dma_async_issue_pending(tspi->tx_dma_chan);
0489     return 0;
0490 }
0491 
0492 static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
0493 {
0494     reinit_completion(&tspi->rx_dma_complete);
0495     tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
0496                 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
0497                 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
0498     if (!tspi->rx_dma_desc) {
0499         dev_err(tspi->dev, "Not able to get desc for Rx\n");
0500         return -EIO;
0501     }
0502 
0503     tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
0504     tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
0505 
0506     dmaengine_submit(tspi->rx_dma_desc);
0507     dma_async_issue_pending(tspi->rx_dma_chan);
0508     return 0;
0509 }
0510 
0511 static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
0512 {
0513     unsigned long timeout = jiffies + HZ;
0514     u32 status;
0515 
0516     status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
0517     if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
0518         status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
0519         tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
0520         while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
0521             status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
0522             if (time_after(jiffies, timeout)) {
0523                 dev_err(tspi->dev,
0524                     "timeout waiting for fifo flush\n");
0525                 return -EIO;
0526             }
0527 
0528             udelay(1);
0529         }
0530     }
0531 
0532     return 0;
0533 }
0534 
0535 static int tegra_spi_start_dma_based_transfer(
0536         struct tegra_spi_data *tspi, struct spi_transfer *t)
0537 {
0538     u32 val;
0539     unsigned int len;
0540     int ret = 0;
0541     u8 dma_burst;
0542     struct dma_slave_config dma_sconfig = {0};
0543 
0544     val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
0545     tegra_spi_writel(tspi, val, SPI_DMA_BLK);
0546 
0547     if (tspi->is_packed)
0548         len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
0549                     4) * 4;
0550     else
0551         len = tspi->curr_dma_words * 4;
0552 
0553     /* Set attention level based on length of transfer */
0554     if (len & 0xF) {
0555         val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
0556         dma_burst = 1;
0557     } else if (((len) >> 4) & 0x1) {
0558         val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
0559         dma_burst = 4;
0560     } else {
0561         val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
0562         dma_burst = 8;
0563     }
0564 
0565     if (!tspi->soc_data->has_intr_mask_reg) {
0566         if (tspi->cur_direction & DATA_DIR_TX)
0567             val |= SPI_IE_TX;
0568 
0569         if (tspi->cur_direction & DATA_DIR_RX)
0570             val |= SPI_IE_RX;
0571     }
0572 
0573     tegra_spi_writel(tspi, val, SPI_DMA_CTL);
0574     tspi->dma_control_reg = val;
0575 
0576     dma_sconfig.device_fc = true;
0577     if (tspi->cur_direction & DATA_DIR_TX) {
0578         dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
0579         dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0580         dma_sconfig.dst_maxburst = dma_burst;
0581         ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
0582         if (ret < 0) {
0583             dev_err(tspi->dev,
0584                 "DMA slave config failed: %d\n", ret);
0585             return ret;
0586         }
0587 
0588         tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
0589         ret = tegra_spi_start_tx_dma(tspi, len);
0590         if (ret < 0) {
0591             dev_err(tspi->dev,
0592                 "Starting tx dma failed, err %d\n", ret);
0593             return ret;
0594         }
0595     }
0596 
0597     if (tspi->cur_direction & DATA_DIR_RX) {
0598         dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
0599         dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0600         dma_sconfig.src_maxburst = dma_burst;
0601         ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
0602         if (ret < 0) {
0603             dev_err(tspi->dev,
0604                 "DMA slave config failed: %d\n", ret);
0605             return ret;
0606         }
0607 
0608         /* Make the dma buffer to read by dma */
0609         dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
0610                 tspi->dma_buf_size, DMA_FROM_DEVICE);
0611 
0612         ret = tegra_spi_start_rx_dma(tspi, len);
0613         if (ret < 0) {
0614             dev_err(tspi->dev,
0615                 "Starting rx dma failed, err %d\n", ret);
0616             if (tspi->cur_direction & DATA_DIR_TX)
0617                 dmaengine_terminate_all(tspi->tx_dma_chan);
0618             return ret;
0619         }
0620     }
0621     tspi->is_curr_dma_xfer = true;
0622     tspi->dma_control_reg = val;
0623 
0624     val |= SPI_DMA_EN;
0625     tegra_spi_writel(tspi, val, SPI_DMA_CTL);
0626     return ret;
0627 }
0628 
0629 static int tegra_spi_start_cpu_based_transfer(
0630         struct tegra_spi_data *tspi, struct spi_transfer *t)
0631 {
0632     u32 val;
0633     unsigned cur_words;
0634 
0635     if (tspi->cur_direction & DATA_DIR_TX)
0636         cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
0637     else
0638         cur_words = tspi->curr_dma_words;
0639 
0640     val = SPI_DMA_BLK_SET(cur_words - 1);
0641     tegra_spi_writel(tspi, val, SPI_DMA_BLK);
0642 
0643     val = 0;
0644     if (tspi->cur_direction & DATA_DIR_TX)
0645         val |= SPI_IE_TX;
0646 
0647     if (tspi->cur_direction & DATA_DIR_RX)
0648         val |= SPI_IE_RX;
0649 
0650     tegra_spi_writel(tspi, val, SPI_DMA_CTL);
0651     tspi->dma_control_reg = val;
0652 
0653     tspi->is_curr_dma_xfer = false;
0654 
0655     val = tspi->command1_reg;
0656     val |= SPI_PIO;
0657     tegra_spi_writel(tspi, val, SPI_COMMAND1);
0658     return 0;
0659 }
0660 
0661 static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
0662             bool dma_to_memory)
0663 {
0664     struct dma_chan *dma_chan;
0665     u32 *dma_buf;
0666     dma_addr_t dma_phys;
0667 
0668     dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
0669     if (IS_ERR(dma_chan))
0670         return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
0671                      "Dma channel is not available\n");
0672 
0673     dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
0674                 &dma_phys, GFP_KERNEL);
0675     if (!dma_buf) {
0676         dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
0677         dma_release_channel(dma_chan);
0678         return -ENOMEM;
0679     }
0680 
0681     if (dma_to_memory) {
0682         tspi->rx_dma_chan = dma_chan;
0683         tspi->rx_dma_buf = dma_buf;
0684         tspi->rx_dma_phys = dma_phys;
0685     } else {
0686         tspi->tx_dma_chan = dma_chan;
0687         tspi->tx_dma_buf = dma_buf;
0688         tspi->tx_dma_phys = dma_phys;
0689     }
0690     return 0;
0691 }
0692 
0693 static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
0694     bool dma_to_memory)
0695 {
0696     u32 *dma_buf;
0697     dma_addr_t dma_phys;
0698     struct dma_chan *dma_chan;
0699 
0700     if (dma_to_memory) {
0701         dma_buf = tspi->rx_dma_buf;
0702         dma_chan = tspi->rx_dma_chan;
0703         dma_phys = tspi->rx_dma_phys;
0704         tspi->rx_dma_chan = NULL;
0705         tspi->rx_dma_buf = NULL;
0706     } else {
0707         dma_buf = tspi->tx_dma_buf;
0708         dma_chan = tspi->tx_dma_chan;
0709         dma_phys = tspi->tx_dma_phys;
0710         tspi->tx_dma_buf = NULL;
0711         tspi->tx_dma_chan = NULL;
0712     }
0713     if (!dma_chan)
0714         return;
0715 
0716     dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
0717     dma_release_channel(dma_chan);
0718 }
0719 
0720 static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
0721 {
0722     struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
0723     struct spi_delay *setup = &spi->cs_setup;
0724     struct spi_delay *hold = &spi->cs_hold;
0725     struct spi_delay *inactive = &spi->cs_inactive;
0726     u8 setup_dly, hold_dly, inactive_dly;
0727     u32 setup_hold;
0728     u32 spi_cs_timing;
0729     u32 inactive_cycles;
0730     u8 cs_state;
0731 
0732     if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
0733         (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
0734         (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
0735         dev_err(&spi->dev,
0736             "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
0737             SPI_DELAY_UNIT_SCK);
0738         return -EINVAL;
0739     }
0740 
0741     setup_dly = setup ? setup->value : 0;
0742     hold_dly = hold ? hold->value : 0;
0743     inactive_dly = inactive ? inactive->value : 0;
0744 
0745     setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
0746     hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
0747     if (setup_dly && hold_dly) {
0748         setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
0749         spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
0750                           spi->chip_select,
0751                           setup_hold);
0752         if (tspi->spi_cs_timing1 != spi_cs_timing) {
0753             tspi->spi_cs_timing1 = spi_cs_timing;
0754             tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING1);
0755         }
0756     }
0757 
0758     inactive_cycles = min_t(u8, inactive_dly, MAX_INACTIVE_CYCLES);
0759     if (inactive_cycles)
0760         inactive_cycles--;
0761     cs_state = inactive_cycles ? 0 : 1;
0762     spi_cs_timing = tspi->spi_cs_timing2;
0763     SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
0764                       cs_state);
0765     SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
0766                        inactive_cycles);
0767     if (tspi->spi_cs_timing2 != spi_cs_timing) {
0768         tspi->spi_cs_timing2 = spi_cs_timing;
0769         tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
0770     }
0771 
0772     return 0;
0773 }
0774 
0775 static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
0776                     struct spi_transfer *t,
0777                     bool is_first_of_msg,
0778                     bool is_single_xfer)
0779 {
0780     struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
0781     struct tegra_spi_client_data *cdata = spi->controller_data;
0782     u32 speed = t->speed_hz;
0783     u8 bits_per_word = t->bits_per_word;
0784     u32 command1, command2;
0785     int req_mode;
0786     u32 tx_tap = 0, rx_tap = 0;
0787 
0788     if (speed != tspi->cur_speed) {
0789         clk_set_rate(tspi->clk, speed);
0790         tspi->cur_speed = speed;
0791     }
0792 
0793     tspi->cur_spi = spi;
0794     tspi->cur_pos = 0;
0795     tspi->cur_rx_pos = 0;
0796     tspi->cur_tx_pos = 0;
0797     tspi->curr_xfer = t;
0798 
0799     if (is_first_of_msg) {
0800         tegra_spi_clear_status(tspi);
0801 
0802         command1 = tspi->def_command1_reg;
0803         command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
0804 
0805         command1 &= ~SPI_CONTROL_MODE_MASK;
0806         req_mode = spi->mode & 0x3;
0807         if (req_mode == SPI_MODE_0)
0808             command1 |= SPI_CONTROL_MODE_0;
0809         else if (req_mode == SPI_MODE_1)
0810             command1 |= SPI_CONTROL_MODE_1;
0811         else if (req_mode == SPI_MODE_2)
0812             command1 |= SPI_CONTROL_MODE_2;
0813         else if (req_mode == SPI_MODE_3)
0814             command1 |= SPI_CONTROL_MODE_3;
0815 
0816         if (spi->mode & SPI_LSB_FIRST)
0817             command1 |= SPI_LSBIT_FE;
0818         else
0819             command1 &= ~SPI_LSBIT_FE;
0820 
0821         if (spi->mode & SPI_3WIRE)
0822             command1 |= SPI_BIDIROE;
0823         else
0824             command1 &= ~SPI_BIDIROE;
0825 
0826         if (tspi->cs_control) {
0827             if (tspi->cs_control != spi)
0828                 tegra_spi_writel(tspi, command1, SPI_COMMAND1);
0829             tspi->cs_control = NULL;
0830         } else
0831             tegra_spi_writel(tspi, command1, SPI_COMMAND1);
0832 
0833         /* GPIO based chip select control */
0834         if (spi->cs_gpiod)
0835             gpiod_set_value(spi->cs_gpiod, 1);
0836 
0837         if (is_single_xfer && !(t->cs_change)) {
0838             tspi->use_hw_based_cs = true;
0839             command1 &= ~(SPI_CS_SW_HW | SPI_CS_SW_VAL);
0840         } else {
0841             tspi->use_hw_based_cs = false;
0842             command1 |= SPI_CS_SW_HW;
0843             if (spi->mode & SPI_CS_HIGH)
0844                 command1 |= SPI_CS_SW_VAL;
0845             else
0846                 command1 &= ~SPI_CS_SW_VAL;
0847         }
0848 
0849         if (tspi->last_used_cs != spi->chip_select) {
0850             if (cdata && cdata->tx_clk_tap_delay)
0851                 tx_tap = cdata->tx_clk_tap_delay;
0852             if (cdata && cdata->rx_clk_tap_delay)
0853                 rx_tap = cdata->rx_clk_tap_delay;
0854             command2 = SPI_TX_TAP_DELAY(tx_tap) |
0855                    SPI_RX_TAP_DELAY(rx_tap);
0856             if (command2 != tspi->def_command2_reg)
0857                 tegra_spi_writel(tspi, command2, SPI_COMMAND2);
0858             tspi->last_used_cs = spi->chip_select;
0859         }
0860 
0861     } else {
0862         command1 = tspi->command1_reg;
0863         command1 &= ~SPI_BIT_LENGTH(~0);
0864         command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
0865     }
0866 
0867     return command1;
0868 }
0869 
0870 static int tegra_spi_start_transfer_one(struct spi_device *spi,
0871         struct spi_transfer *t, u32 command1)
0872 {
0873     struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
0874     unsigned total_fifo_words;
0875     int ret;
0876 
0877     total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
0878 
0879     if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
0880         command1 |= SPI_BOTH_EN_BIT;
0881     else
0882         command1 &= ~SPI_BOTH_EN_BIT;
0883 
0884     if (tspi->is_packed)
0885         command1 |= SPI_PACKED;
0886     else
0887         command1 &= ~SPI_PACKED;
0888 
0889     command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
0890     tspi->cur_direction = 0;
0891     if (t->rx_buf) {
0892         command1 |= SPI_RX_EN;
0893         tspi->cur_direction |= DATA_DIR_RX;
0894     }
0895     if (t->tx_buf) {
0896         command1 |= SPI_TX_EN;
0897         tspi->cur_direction |= DATA_DIR_TX;
0898     }
0899     command1 |= SPI_CS_SEL(spi->chip_select);
0900     tegra_spi_writel(tspi, command1, SPI_COMMAND1);
0901     tspi->command1_reg = command1;
0902 
0903     dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
0904         tspi->def_command1_reg, (unsigned)command1);
0905 
0906     ret = tegra_spi_flush_fifos(tspi);
0907     if (ret < 0)
0908         return ret;
0909     if (total_fifo_words > SPI_FIFO_DEPTH)
0910         ret = tegra_spi_start_dma_based_transfer(tspi, t);
0911     else
0912         ret = tegra_spi_start_cpu_based_transfer(tspi, t);
0913     return ret;
0914 }
0915 
0916 static struct tegra_spi_client_data
0917     *tegra_spi_parse_cdata_dt(struct spi_device *spi)
0918 {
0919     struct tegra_spi_client_data *cdata;
0920     struct device_node *slave_np;
0921 
0922     slave_np = spi->dev.of_node;
0923     if (!slave_np) {
0924         dev_dbg(&spi->dev, "device node not found\n");
0925         return NULL;
0926     }
0927 
0928     cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
0929     if (!cdata)
0930         return NULL;
0931 
0932     of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
0933                  &cdata->tx_clk_tap_delay);
0934     of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
0935                  &cdata->rx_clk_tap_delay);
0936     return cdata;
0937 }
0938 
0939 static void tegra_spi_cleanup(struct spi_device *spi)
0940 {
0941     struct tegra_spi_client_data *cdata = spi->controller_data;
0942 
0943     spi->controller_data = NULL;
0944     if (spi->dev.of_node)
0945         kfree(cdata);
0946 }
0947 
0948 static int tegra_spi_setup(struct spi_device *spi)
0949 {
0950     struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
0951     struct tegra_spi_client_data *cdata = spi->controller_data;
0952     u32 val;
0953     unsigned long flags;
0954     int ret;
0955 
0956     dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
0957         spi->bits_per_word,
0958         spi->mode & SPI_CPOL ? "" : "~",
0959         spi->mode & SPI_CPHA ? "" : "~",
0960         spi->max_speed_hz);
0961 
0962     if (!cdata) {
0963         cdata = tegra_spi_parse_cdata_dt(spi);
0964         spi->controller_data = cdata;
0965     }
0966 
0967     ret = pm_runtime_resume_and_get(tspi->dev);
0968     if (ret < 0) {
0969         dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
0970         if (cdata)
0971             tegra_spi_cleanup(spi);
0972         return ret;
0973     }
0974 
0975     if (tspi->soc_data->has_intr_mask_reg) {
0976         val = tegra_spi_readl(tspi, SPI_INTR_MASK);
0977         val &= ~SPI_INTR_ALL_MASK;
0978         tegra_spi_writel(tspi, val, SPI_INTR_MASK);
0979     }
0980 
0981     spin_lock_irqsave(&tspi->lock, flags);
0982     /* GPIO based chip select control */
0983     if (spi->cs_gpiod)
0984         gpiod_set_value(spi->cs_gpiod, 0);
0985 
0986     val = tspi->def_command1_reg;
0987     if (spi->mode & SPI_CS_HIGH)
0988         val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
0989     else
0990         val |= SPI_CS_POL_INACTIVE(spi->chip_select);
0991     tspi->def_command1_reg = val;
0992     tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
0993     spin_unlock_irqrestore(&tspi->lock, flags);
0994 
0995     pm_runtime_put(tspi->dev);
0996     return 0;
0997 }
0998 
0999 static void tegra_spi_transfer_end(struct spi_device *spi)
1000 {
1001     struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
1002     int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1003 
1004     /* GPIO based chip select control */
1005     if (spi->cs_gpiod)
1006         gpiod_set_value(spi->cs_gpiod, 0);
1007 
1008     if (!tspi->use_hw_based_cs) {
1009         if (cs_val)
1010             tspi->command1_reg |= SPI_CS_SW_VAL;
1011         else
1012             tspi->command1_reg &= ~SPI_CS_SW_VAL;
1013         tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
1014     }
1015 
1016     tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1017 }
1018 
1019 static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
1020 {
1021     dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
1022     dev_dbg(tspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
1023         tegra_spi_readl(tspi, SPI_COMMAND1),
1024         tegra_spi_readl(tspi, SPI_COMMAND2));
1025     dev_dbg(tspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
1026         tegra_spi_readl(tspi, SPI_DMA_CTL),
1027         tegra_spi_readl(tspi, SPI_DMA_BLK));
1028     dev_dbg(tspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
1029         tegra_spi_readl(tspi, SPI_TRANS_STATUS),
1030         tegra_spi_readl(tspi, SPI_FIFO_STATUS));
1031 }
1032 
1033 static int tegra_spi_transfer_one_message(struct spi_master *master,
1034             struct spi_message *msg)
1035 {
1036     bool is_first_msg = true;
1037     struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1038     struct spi_transfer *xfer;
1039     struct spi_device *spi = msg->spi;
1040     int ret;
1041     bool skip = false;
1042     int single_xfer;
1043 
1044     msg->status = 0;
1045     msg->actual_length = 0;
1046 
1047     single_xfer = list_is_singular(&msg->transfers);
1048     list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1049         u32 cmd1;
1050 
1051         reinit_completion(&tspi->xfer_completion);
1052 
1053         cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg,
1054                             single_xfer);
1055 
1056         if (!xfer->len) {
1057             ret = 0;
1058             skip = true;
1059             goto complete_xfer;
1060         }
1061 
1062         ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
1063         if (ret < 0) {
1064             dev_err(tspi->dev,
1065                 "spi can not start transfer, err %d\n", ret);
1066             goto complete_xfer;
1067         }
1068 
1069         is_first_msg = false;
1070         ret = wait_for_completion_timeout(&tspi->xfer_completion,
1071                         SPI_DMA_TIMEOUT);
1072         if (WARN_ON(ret == 0)) {
1073             dev_err(tspi->dev, "spi transfer timeout\n");
1074             if (tspi->is_curr_dma_xfer &&
1075                 (tspi->cur_direction & DATA_DIR_TX))
1076                 dmaengine_terminate_all(tspi->tx_dma_chan);
1077             if (tspi->is_curr_dma_xfer &&
1078                 (tspi->cur_direction & DATA_DIR_RX))
1079                 dmaengine_terminate_all(tspi->rx_dma_chan);
1080             ret = -EIO;
1081             tegra_spi_dump_regs(tspi);
1082             tegra_spi_flush_fifos(tspi);
1083             reset_control_assert(tspi->rst);
1084             udelay(2);
1085             reset_control_deassert(tspi->rst);
1086             tspi->last_used_cs = master->num_chipselect + 1;
1087             goto complete_xfer;
1088         }
1089 
1090         if (tspi->tx_status ||  tspi->rx_status) {
1091             dev_err(tspi->dev, "Error in Transfer\n");
1092             ret = -EIO;
1093             tegra_spi_dump_regs(tspi);
1094             goto complete_xfer;
1095         }
1096         msg->actual_length += xfer->len;
1097 
1098 complete_xfer:
1099         if (ret < 0 || skip) {
1100             tegra_spi_transfer_end(spi);
1101             spi_transfer_delay_exec(xfer);
1102             goto exit;
1103         } else if (list_is_last(&xfer->transfer_list,
1104                     &msg->transfers)) {
1105             if (xfer->cs_change)
1106                 tspi->cs_control = spi;
1107             else {
1108                 tegra_spi_transfer_end(spi);
1109                 spi_transfer_delay_exec(xfer);
1110             }
1111         } else if (xfer->cs_change) {
1112             tegra_spi_transfer_end(spi);
1113             spi_transfer_delay_exec(xfer);
1114         }
1115 
1116     }
1117     ret = 0;
1118 exit:
1119     msg->status = ret;
1120     spi_finalize_current_message(master);
1121     return ret;
1122 }
1123 
1124 static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
1125 {
1126     struct spi_transfer *t = tspi->curr_xfer;
1127     unsigned long flags;
1128 
1129     spin_lock_irqsave(&tspi->lock, flags);
1130     if (tspi->tx_status ||  tspi->rx_status) {
1131         dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
1132             tspi->status_reg);
1133         dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
1134             tspi->command1_reg, tspi->dma_control_reg);
1135         tegra_spi_dump_regs(tspi);
1136         tegra_spi_flush_fifos(tspi);
1137         complete(&tspi->xfer_completion);
1138         spin_unlock_irqrestore(&tspi->lock, flags);
1139         reset_control_assert(tspi->rst);
1140         udelay(2);
1141         reset_control_deassert(tspi->rst);
1142         return IRQ_HANDLED;
1143     }
1144 
1145     if (tspi->cur_direction & DATA_DIR_RX)
1146         tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
1147 
1148     if (tspi->cur_direction & DATA_DIR_TX)
1149         tspi->cur_pos = tspi->cur_tx_pos;
1150     else
1151         tspi->cur_pos = tspi->cur_rx_pos;
1152 
1153     if (tspi->cur_pos == t->len) {
1154         complete(&tspi->xfer_completion);
1155         goto exit;
1156     }
1157 
1158     tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
1159     tegra_spi_start_cpu_based_transfer(tspi, t);
1160 exit:
1161     spin_unlock_irqrestore(&tspi->lock, flags);
1162     return IRQ_HANDLED;
1163 }
1164 
1165 static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
1166 {
1167     struct spi_transfer *t = tspi->curr_xfer;
1168     long wait_status;
1169     int err = 0;
1170     unsigned total_fifo_words;
1171     unsigned long flags;
1172 
1173     /* Abort dmas if any error */
1174     if (tspi->cur_direction & DATA_DIR_TX) {
1175         if (tspi->tx_status) {
1176             dmaengine_terminate_all(tspi->tx_dma_chan);
1177             err += 1;
1178         } else {
1179             wait_status = wait_for_completion_interruptible_timeout(
1180                 &tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
1181             if (wait_status <= 0) {
1182                 dmaengine_terminate_all(tspi->tx_dma_chan);
1183                 dev_err(tspi->dev, "TxDma Xfer failed\n");
1184                 err += 1;
1185             }
1186         }
1187     }
1188 
1189     if (tspi->cur_direction & DATA_DIR_RX) {
1190         if (tspi->rx_status) {
1191             dmaengine_terminate_all(tspi->rx_dma_chan);
1192             err += 2;
1193         } else {
1194             wait_status = wait_for_completion_interruptible_timeout(
1195                 &tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
1196             if (wait_status <= 0) {
1197                 dmaengine_terminate_all(tspi->rx_dma_chan);
1198                 dev_err(tspi->dev, "RxDma Xfer failed\n");
1199                 err += 2;
1200             }
1201         }
1202     }
1203 
1204     spin_lock_irqsave(&tspi->lock, flags);
1205     if (err) {
1206         dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
1207             tspi->status_reg);
1208         dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
1209             tspi->command1_reg, tspi->dma_control_reg);
1210         tegra_spi_dump_regs(tspi);
1211         tegra_spi_flush_fifos(tspi);
1212         complete(&tspi->xfer_completion);
1213         spin_unlock_irqrestore(&tspi->lock, flags);
1214         reset_control_assert(tspi->rst);
1215         udelay(2);
1216         reset_control_deassert(tspi->rst);
1217         return IRQ_HANDLED;
1218     }
1219 
1220     if (tspi->cur_direction & DATA_DIR_RX)
1221         tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
1222 
1223     if (tspi->cur_direction & DATA_DIR_TX)
1224         tspi->cur_pos = tspi->cur_tx_pos;
1225     else
1226         tspi->cur_pos = tspi->cur_rx_pos;
1227 
1228     if (tspi->cur_pos == t->len) {
1229         complete(&tspi->xfer_completion);
1230         goto exit;
1231     }
1232 
1233     /* Continue transfer in current message */
1234     total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
1235                             tspi, t);
1236     if (total_fifo_words > SPI_FIFO_DEPTH)
1237         err = tegra_spi_start_dma_based_transfer(tspi, t);
1238     else
1239         err = tegra_spi_start_cpu_based_transfer(tspi, t);
1240 
1241 exit:
1242     spin_unlock_irqrestore(&tspi->lock, flags);
1243     return IRQ_HANDLED;
1244 }
1245 
1246 static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
1247 {
1248     struct tegra_spi_data *tspi = context_data;
1249 
1250     if (!tspi->is_curr_dma_xfer)
1251         return handle_cpu_based_xfer(tspi);
1252     return handle_dma_based_xfer(tspi);
1253 }
1254 
1255 static irqreturn_t tegra_spi_isr(int irq, void *context_data)
1256 {
1257     struct tegra_spi_data *tspi = context_data;
1258 
1259     tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
1260     if (tspi->cur_direction & DATA_DIR_TX)
1261         tspi->tx_status = tspi->status_reg &
1262                     (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
1263 
1264     if (tspi->cur_direction & DATA_DIR_RX)
1265         tspi->rx_status = tspi->status_reg &
1266                     (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
1267     tegra_spi_clear_status(tspi);
1268 
1269     return IRQ_WAKE_THREAD;
1270 }
1271 
1272 static struct tegra_spi_soc_data tegra114_spi_soc_data = {
1273     .has_intr_mask_reg = false,
1274 };
1275 
1276 static struct tegra_spi_soc_data tegra124_spi_soc_data = {
1277     .has_intr_mask_reg = false,
1278 };
1279 
1280 static struct tegra_spi_soc_data tegra210_spi_soc_data = {
1281     .has_intr_mask_reg = true,
1282 };
1283 
1284 static const struct of_device_id tegra_spi_of_match[] = {
1285     {
1286         .compatible = "nvidia,tegra114-spi",
1287         .data       = &tegra114_spi_soc_data,
1288     }, {
1289         .compatible = "nvidia,tegra124-spi",
1290         .data       = &tegra124_spi_soc_data,
1291     }, {
1292         .compatible = "nvidia,tegra210-spi",
1293         .data       = &tegra210_spi_soc_data,
1294     },
1295     {}
1296 };
1297 MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
1298 
1299 static int tegra_spi_probe(struct platform_device *pdev)
1300 {
1301     struct spi_master   *master;
1302     struct tegra_spi_data   *tspi;
1303     struct resource     *r;
1304     int ret, spi_irq;
1305     int bus_num;
1306 
1307     master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
1308     if (!master) {
1309         dev_err(&pdev->dev, "master allocation failed\n");
1310         return -ENOMEM;
1311     }
1312     platform_set_drvdata(pdev, master);
1313     tspi = spi_master_get_devdata(master);
1314 
1315     if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
1316                  &master->max_speed_hz))
1317         master->max_speed_hz = 25000000; /* 25MHz */
1318 
1319     /* the spi->mode bits understood by this driver: */
1320     master->use_gpio_descriptors = true;
1321     master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
1322                 SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
1323     master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1324     master->setup = tegra_spi_setup;
1325     master->cleanup = tegra_spi_cleanup;
1326     master->transfer_one_message = tegra_spi_transfer_one_message;
1327     master->set_cs_timing = tegra_spi_set_hw_cs_timing;
1328     master->num_chipselect = MAX_CHIP_SELECT;
1329     master->auto_runtime_pm = true;
1330     bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1331     if (bus_num >= 0)
1332         master->bus_num = bus_num;
1333 
1334     tspi->master = master;
1335     tspi->dev = &pdev->dev;
1336     spin_lock_init(&tspi->lock);
1337 
1338     tspi->soc_data = of_device_get_match_data(&pdev->dev);
1339     if (!tspi->soc_data) {
1340         dev_err(&pdev->dev, "unsupported tegra\n");
1341         ret = -ENODEV;
1342         goto exit_free_master;
1343     }
1344 
1345     r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1346     tspi->base = devm_ioremap_resource(&pdev->dev, r);
1347     if (IS_ERR(tspi->base)) {
1348         ret = PTR_ERR(tspi->base);
1349         goto exit_free_master;
1350     }
1351     tspi->phys = r->start;
1352 
1353     spi_irq = platform_get_irq(pdev, 0);
1354     if (spi_irq < 0) {
1355         ret = spi_irq;
1356         goto exit_free_master;
1357     }
1358     tspi->irq = spi_irq;
1359 
1360     tspi->clk = devm_clk_get(&pdev->dev, "spi");
1361     if (IS_ERR(tspi->clk)) {
1362         dev_err(&pdev->dev, "can not get clock\n");
1363         ret = PTR_ERR(tspi->clk);
1364         goto exit_free_master;
1365     }
1366 
1367     tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
1368     if (IS_ERR(tspi->rst)) {
1369         dev_err(&pdev->dev, "can not get reset\n");
1370         ret = PTR_ERR(tspi->rst);
1371         goto exit_free_master;
1372     }
1373 
1374     tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
1375     tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1376 
1377     ret = tegra_spi_init_dma_param(tspi, true);
1378     if (ret < 0)
1379         goto exit_free_master;
1380     ret = tegra_spi_init_dma_param(tspi, false);
1381     if (ret < 0)
1382         goto exit_rx_dma_free;
1383     tspi->max_buf_size = tspi->dma_buf_size;
1384     init_completion(&tspi->tx_dma_complete);
1385     init_completion(&tspi->rx_dma_complete);
1386 
1387     init_completion(&tspi->xfer_completion);
1388 
1389     pm_runtime_enable(&pdev->dev);
1390     if (!pm_runtime_enabled(&pdev->dev)) {
1391         ret = tegra_spi_runtime_resume(&pdev->dev);
1392         if (ret)
1393             goto exit_pm_disable;
1394     }
1395 
1396     ret = pm_runtime_resume_and_get(&pdev->dev);
1397     if (ret < 0) {
1398         dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
1399         goto exit_pm_disable;
1400     }
1401 
1402     reset_control_assert(tspi->rst);
1403     udelay(2);
1404     reset_control_deassert(tspi->rst);
1405     tspi->def_command1_reg  = SPI_M_S;
1406     tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1407     tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
1408     tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
1409     tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
1410     tspi->last_used_cs = master->num_chipselect + 1;
1411     pm_runtime_put(&pdev->dev);
1412     ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
1413                    tegra_spi_isr_thread, IRQF_ONESHOT,
1414                    dev_name(&pdev->dev), tspi);
1415     if (ret < 0) {
1416         dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1417             tspi->irq);
1418         goto exit_pm_disable;
1419     }
1420 
1421     master->dev.of_node = pdev->dev.of_node;
1422     ret = devm_spi_register_master(&pdev->dev, master);
1423     if (ret < 0) {
1424         dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1425         goto exit_free_irq;
1426     }
1427     return ret;
1428 
1429 exit_free_irq:
1430     free_irq(spi_irq, tspi);
1431 exit_pm_disable:
1432     pm_runtime_disable(&pdev->dev);
1433     if (!pm_runtime_status_suspended(&pdev->dev))
1434         tegra_spi_runtime_suspend(&pdev->dev);
1435     tegra_spi_deinit_dma_param(tspi, false);
1436 exit_rx_dma_free:
1437     tegra_spi_deinit_dma_param(tspi, true);
1438 exit_free_master:
1439     spi_master_put(master);
1440     return ret;
1441 }
1442 
1443 static int tegra_spi_remove(struct platform_device *pdev)
1444 {
1445     struct spi_master *master = platform_get_drvdata(pdev);
1446     struct tegra_spi_data   *tspi = spi_master_get_devdata(master);
1447 
1448     free_irq(tspi->irq, tspi);
1449 
1450     if (tspi->tx_dma_chan)
1451         tegra_spi_deinit_dma_param(tspi, false);
1452 
1453     if (tspi->rx_dma_chan)
1454         tegra_spi_deinit_dma_param(tspi, true);
1455 
1456     pm_runtime_disable(&pdev->dev);
1457     if (!pm_runtime_status_suspended(&pdev->dev))
1458         tegra_spi_runtime_suspend(&pdev->dev);
1459 
1460     return 0;
1461 }
1462 
1463 #ifdef CONFIG_PM_SLEEP
1464 static int tegra_spi_suspend(struct device *dev)
1465 {
1466     struct spi_master *master = dev_get_drvdata(dev);
1467 
1468     return spi_master_suspend(master);
1469 }
1470 
1471 static int tegra_spi_resume(struct device *dev)
1472 {
1473     struct spi_master *master = dev_get_drvdata(dev);
1474     struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1475     int ret;
1476 
1477     ret = pm_runtime_resume_and_get(dev);
1478     if (ret < 0) {
1479         dev_err(dev, "pm runtime failed, e = %d\n", ret);
1480         return ret;
1481     }
1482     tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
1483     tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
1484     tspi->last_used_cs = master->num_chipselect + 1;
1485     pm_runtime_put(dev);
1486 
1487     return spi_master_resume(master);
1488 }
1489 #endif
1490 
1491 static int tegra_spi_runtime_suspend(struct device *dev)
1492 {
1493     struct spi_master *master = dev_get_drvdata(dev);
1494     struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1495 
1496     /* Flush all write which are in PPSB queue by reading back */
1497     tegra_spi_readl(tspi, SPI_COMMAND1);
1498 
1499     clk_disable_unprepare(tspi->clk);
1500     return 0;
1501 }
1502 
1503 static int tegra_spi_runtime_resume(struct device *dev)
1504 {
1505     struct spi_master *master = dev_get_drvdata(dev);
1506     struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1507     int ret;
1508 
1509     ret = clk_prepare_enable(tspi->clk);
1510     if (ret < 0) {
1511         dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
1512         return ret;
1513     }
1514     return 0;
1515 }
1516 
1517 static const struct dev_pm_ops tegra_spi_pm_ops = {
1518     SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
1519         tegra_spi_runtime_resume, NULL)
1520     SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
1521 };
1522 static struct platform_driver tegra_spi_driver = {
1523     .driver = {
1524         .name       = "spi-tegra114",
1525         .pm     = &tegra_spi_pm_ops,
1526         .of_match_table = tegra_spi_of_match,
1527     },
1528     .probe =    tegra_spi_probe,
1529     .remove =   tegra_spi_remove,
1530 };
1531 module_platform_driver(tegra_spi_driver);
1532 
1533 MODULE_ALIAS("platform:spi-tegra114");
1534 MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
1535 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1536 MODULE_LICENSE("GPL v2");