Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 //
0003 // Copyright (C) 2020 NVIDIA CORPORATION.
0004 
0005 #include <linux/clk.h>
0006 #include <linux/completion.h>
0007 #include <linux/delay.h>
0008 #include <linux/dmaengine.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/dmapool.h>
0011 #include <linux/err.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/io.h>
0014 #include <linux/iopoll.h>
0015 #include <linux/kernel.h>
0016 #include <linux/kthread.h>
0017 #include <linux/module.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/of.h>
0021 #include <linux/of_device.h>
0022 #include <linux/reset.h>
0023 #include <linux/spi/spi.h>
0024 #include <linux/acpi.h>
0025 #include <linux/property.h>
0026 
0027 #define QSPI_COMMAND1               0x000
0028 #define QSPI_BIT_LENGTH(x)          (((x) & 0x1f) << 0)
0029 #define QSPI_PACKED             BIT(5)
0030 #define QSPI_INTERFACE_WIDTH_MASK       (0x03 << 7)
0031 #define QSPI_INTERFACE_WIDTH(x)         (((x) & 0x03) << 7)
0032 #define QSPI_INTERFACE_WIDTH_SINGLE     QSPI_INTERFACE_WIDTH(0)
0033 #define QSPI_INTERFACE_WIDTH_DUAL       QSPI_INTERFACE_WIDTH(1)
0034 #define QSPI_INTERFACE_WIDTH_QUAD       QSPI_INTERFACE_WIDTH(2)
0035 #define QSPI_SDR_DDR_SEL            BIT(9)
0036 #define QSPI_TX_EN              BIT(11)
0037 #define QSPI_RX_EN              BIT(12)
0038 #define QSPI_CS_SW_VAL              BIT(20)
0039 #define QSPI_CS_SW_HW               BIT(21)
0040 
0041 #define QSPI_CS_POL_INACTIVE(n)         (1 << (22 + (n)))
0042 #define QSPI_CS_POL_INACTIVE_MASK       (0xF << 22)
0043 #define QSPI_CS_SEL_0               (0 << 26)
0044 #define QSPI_CS_SEL_1               (1 << 26)
0045 #define QSPI_CS_SEL_2               (2 << 26)
0046 #define QSPI_CS_SEL_3               (3 << 26)
0047 #define QSPI_CS_SEL_MASK            (3 << 26)
0048 #define QSPI_CS_SEL(x)              (((x) & 0x3) << 26)
0049 
0050 #define QSPI_CONTROL_MODE_0         (0 << 28)
0051 #define QSPI_CONTROL_MODE_3         (3 << 28)
0052 #define QSPI_CONTROL_MODE_MASK          (3 << 28)
0053 #define QSPI_M_S                BIT(30)
0054 #define QSPI_PIO                BIT(31)
0055 
0056 #define QSPI_COMMAND2               0x004
0057 #define QSPI_TX_TAP_DELAY(x)            (((x) & 0x3f) << 10)
0058 #define QSPI_RX_TAP_DELAY(x)            (((x) & 0xff) << 0)
0059 
0060 #define QSPI_CS_TIMING1             0x008
0061 #define QSPI_SETUP_HOLD(setup, hold)        (((setup) << 4) | (hold))
0062 
0063 #define QSPI_CS_TIMING2             0x00c
0064 #define CYCLES_BETWEEN_PACKETS_0(x)     (((x) & 0x1f) << 0)
0065 #define CS_ACTIVE_BETWEEN_PACKETS_0     BIT(5)
0066 
0067 #define QSPI_TRANS_STATUS           0x010
0068 #define QSPI_BLK_CNT(val)           (((val) >> 0) & 0xffff)
0069 #define QSPI_RDY                BIT(30)
0070 
0071 #define QSPI_FIFO_STATUS            0x014
0072 #define QSPI_RX_FIFO_EMPTY          BIT(0)
0073 #define QSPI_RX_FIFO_FULL           BIT(1)
0074 #define QSPI_TX_FIFO_EMPTY          BIT(2)
0075 #define QSPI_TX_FIFO_FULL           BIT(3)
0076 #define QSPI_RX_FIFO_UNF            BIT(4)
0077 #define QSPI_RX_FIFO_OVF            BIT(5)
0078 #define QSPI_TX_FIFO_UNF            BIT(6)
0079 #define QSPI_TX_FIFO_OVF            BIT(7)
0080 #define QSPI_ERR                BIT(8)
0081 #define QSPI_TX_FIFO_FLUSH          BIT(14)
0082 #define QSPI_RX_FIFO_FLUSH          BIT(15)
0083 #define QSPI_TX_FIFO_EMPTY_COUNT(val)       (((val) >> 16) & 0x7f)
0084 #define QSPI_RX_FIFO_FULL_COUNT(val)        (((val) >> 23) & 0x7f)
0085 
0086 #define QSPI_FIFO_ERROR             (QSPI_RX_FIFO_UNF | \
0087                          QSPI_RX_FIFO_OVF | \
0088                          QSPI_TX_FIFO_UNF | \
0089                          QSPI_TX_FIFO_OVF)
0090 #define QSPI_FIFO_EMPTY             (QSPI_RX_FIFO_EMPTY | \
0091                          QSPI_TX_FIFO_EMPTY)
0092 
0093 #define QSPI_TX_DATA                0x018
0094 #define QSPI_RX_DATA                0x01c
0095 
0096 #define QSPI_DMA_CTL                0x020
0097 #define QSPI_TX_TRIG(n)             (((n) & 0x3) << 15)
0098 #define QSPI_TX_TRIG_1              QSPI_TX_TRIG(0)
0099 #define QSPI_TX_TRIG_4              QSPI_TX_TRIG(1)
0100 #define QSPI_TX_TRIG_8              QSPI_TX_TRIG(2)
0101 #define QSPI_TX_TRIG_16             QSPI_TX_TRIG(3)
0102 
0103 #define QSPI_RX_TRIG(n)             (((n) & 0x3) << 19)
0104 #define QSPI_RX_TRIG_1              QSPI_RX_TRIG(0)
0105 #define QSPI_RX_TRIG_4              QSPI_RX_TRIG(1)
0106 #define QSPI_RX_TRIG_8              QSPI_RX_TRIG(2)
0107 #define QSPI_RX_TRIG_16             QSPI_RX_TRIG(3)
0108 
0109 #define QSPI_DMA_EN             BIT(31)
0110 
0111 #define QSPI_DMA_BLK                0x024
0112 #define QSPI_DMA_BLK_SET(x)         (((x) & 0xffff) << 0)
0113 
0114 #define QSPI_TX_FIFO                0x108
0115 #define QSPI_RX_FIFO                0x188
0116 
0117 #define QSPI_FIFO_DEPTH             64
0118 
0119 #define QSPI_INTR_MASK              0x18c
0120 #define QSPI_INTR_RX_FIFO_UNF_MASK      BIT(25)
0121 #define QSPI_INTR_RX_FIFO_OVF_MASK      BIT(26)
0122 #define QSPI_INTR_TX_FIFO_UNF_MASK      BIT(27)
0123 #define QSPI_INTR_TX_FIFO_OVF_MASK      BIT(28)
0124 #define QSPI_INTR_RDY_MASK          BIT(29)
0125 #define QSPI_INTR_RX_TX_FIFO_ERR        (QSPI_INTR_RX_FIFO_UNF_MASK | \
0126                          QSPI_INTR_RX_FIFO_OVF_MASK | \
0127                          QSPI_INTR_TX_FIFO_UNF_MASK | \
0128                          QSPI_INTR_TX_FIFO_OVF_MASK)
0129 
0130 #define QSPI_MISC_REG                           0x194
0131 #define QSPI_NUM_DUMMY_CYCLE(x)         (((x) & 0xff) << 0)
0132 #define QSPI_DUMMY_CYCLES_MAX           0xff
0133 
0134 #define QSPI_CMB_SEQ_CMD            0x19c
0135 #define QSPI_COMMAND_VALUE_SET(X)       (((x) & 0xFF) << 0)
0136 
0137 #define QSPI_CMB_SEQ_CMD_CFG            0x1a0
0138 #define QSPI_COMMAND_X1_X2_X4(x)        (((x) & 0x3) << 13)
0139 #define QSPI_COMMAND_X1_X2_X4_MASK      (0x03 << 13)
0140 #define QSPI_COMMAND_SDR_DDR            BIT(12)
0141 #define QSPI_COMMAND_SIZE_SET(x)        (((x) & 0xFF) << 0)
0142 
0143 #define QSPI_GLOBAL_CONFIG          0X1a4
0144 #define QSPI_CMB_SEQ_EN             BIT(0)
0145 
0146 #define QSPI_CMB_SEQ_ADDR           0x1a8
0147 #define QSPI_ADDRESS_VALUE_SET(X)       (((x) & 0xFFFF) << 0)
0148 
0149 #define QSPI_CMB_SEQ_ADDR_CFG           0x1ac
0150 #define QSPI_ADDRESS_X1_X2_X4(x)        (((x) & 0x3) << 13)
0151 #define QSPI_ADDRESS_X1_X2_X4_MASK      (0x03 << 13)
0152 #define QSPI_ADDRESS_SDR_DDR            BIT(12)
0153 #define QSPI_ADDRESS_SIZE_SET(x)        (((x) & 0xFF) << 0)
0154 
0155 #define DATA_DIR_TX             BIT(0)
0156 #define DATA_DIR_RX             BIT(1)
0157 
0158 #define QSPI_DMA_TIMEOUT            (msecs_to_jiffies(1000))
0159 #define DEFAULT_QSPI_DMA_BUF_LEN        (64 * 1024)
0160 #define CMD_TRANSFER                0
0161 #define ADDR_TRANSFER               1
0162 #define DATA_TRANSFER               2
0163 
0164 struct tegra_qspi_soc_data {
0165     bool has_dma;
0166     bool cmb_xfer_capable;
0167     unsigned int cs_count;
0168 };
0169 
0170 struct tegra_qspi_client_data {
0171     int tx_clk_tap_delay;
0172     int rx_clk_tap_delay;
0173 };
0174 
0175 struct tegra_qspi {
0176     struct device               *dev;
0177     struct spi_master           *master;
0178     /* lock to protect data accessed by irq */
0179     spinlock_t              lock;
0180 
0181     struct clk              *clk;
0182     void __iomem                *base;
0183     phys_addr_t             phys;
0184     unsigned int                irq;
0185 
0186     u32                 cur_speed;
0187     unsigned int                cur_pos;
0188     unsigned int                words_per_32bit;
0189     unsigned int                bytes_per_word;
0190     unsigned int                curr_dma_words;
0191     unsigned int                cur_direction;
0192 
0193     unsigned int                cur_rx_pos;
0194     unsigned int                cur_tx_pos;
0195 
0196     unsigned int                dma_buf_size;
0197     unsigned int                max_buf_size;
0198     bool                    is_curr_dma_xfer;
0199 
0200     struct completion           rx_dma_complete;
0201     struct completion           tx_dma_complete;
0202 
0203     u32                 tx_status;
0204     u32                 rx_status;
0205     u32                 status_reg;
0206     bool                    is_packed;
0207     bool                    use_dma;
0208 
0209     u32                 command1_reg;
0210     u32                 dma_control_reg;
0211     u32                 def_command1_reg;
0212     u32                 def_command2_reg;
0213     u32                 spi_cs_timing1;
0214     u32                 spi_cs_timing2;
0215     u8                  dummy_cycles;
0216 
0217     struct completion           xfer_completion;
0218     struct spi_transfer         *curr_xfer;
0219 
0220     struct dma_chan             *rx_dma_chan;
0221     u32                 *rx_dma_buf;
0222     dma_addr_t              rx_dma_phys;
0223     struct dma_async_tx_descriptor      *rx_dma_desc;
0224 
0225     struct dma_chan             *tx_dma_chan;
0226     u32                 *tx_dma_buf;
0227     dma_addr_t              tx_dma_phys;
0228     struct dma_async_tx_descriptor      *tx_dma_desc;
0229     const struct tegra_qspi_soc_data    *soc_data;
0230 };
0231 
0232 static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
0233 {
0234     return readl(tqspi->base + offset);
0235 }
0236 
0237 static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
0238 {
0239     writel(value, tqspi->base + offset);
0240 
0241     /* read back register to make sure that register writes completed */
0242     if (offset != QSPI_TX_FIFO)
0243         readl(tqspi->base + QSPI_COMMAND1);
0244 }
0245 
0246 static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
0247 {
0248     u32 value;
0249 
0250     /* write 1 to clear status register */
0251     value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
0252     tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
0253 
0254     value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
0255     if (!(value & QSPI_INTR_RDY_MASK)) {
0256         value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
0257         tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
0258     }
0259 
0260     /* clear fifo status error if any */
0261     value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
0262     if (value & QSPI_ERR)
0263         tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
0264 }
0265 
0266 static unsigned int
0267 tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
0268 {
0269     unsigned int max_word, max_len, total_fifo_words;
0270     unsigned int remain_len = t->len - tqspi->cur_pos;
0271     unsigned int bits_per_word = t->bits_per_word;
0272 
0273     tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
0274 
0275     /*
0276      * Tegra QSPI controller supports packed or unpacked mode transfers.
0277      * Packed mode is used for data transfers using 8, 16, or 32 bits per
0278      * word with a minimum transfer of 1 word and for all other transfers
0279      * unpacked mode will be used.
0280      */
0281 
0282     if ((bits_per_word == 8 || bits_per_word == 16 ||
0283          bits_per_word == 32) && t->len > 3) {
0284         tqspi->is_packed = true;
0285         tqspi->words_per_32bit = 32 / bits_per_word;
0286     } else {
0287         tqspi->is_packed = false;
0288         tqspi->words_per_32bit = 1;
0289     }
0290 
0291     if (tqspi->is_packed) {
0292         max_len = min(remain_len, tqspi->max_buf_size);
0293         tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
0294         total_fifo_words = (max_len + 3) / 4;
0295     } else {
0296         max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
0297         max_word = min(max_word, tqspi->max_buf_size / 4);
0298         tqspi->curr_dma_words = max_word;
0299         total_fifo_words = max_word;
0300     }
0301 
0302     return total_fifo_words;
0303 }
0304 
0305 static unsigned int
0306 tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
0307 {
0308     unsigned int written_words, fifo_words_left, count;
0309     unsigned int len, tx_empty_count, max_n_32bit, i;
0310     u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
0311     u32 fifo_status;
0312 
0313     fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
0314     tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
0315 
0316     if (tqspi->is_packed) {
0317         fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
0318         written_words = min(fifo_words_left, tqspi->curr_dma_words);
0319         len = written_words * tqspi->bytes_per_word;
0320         max_n_32bit = DIV_ROUND_UP(len, 4);
0321         for (count = 0; count < max_n_32bit; count++) {
0322             u32 x = 0;
0323 
0324             for (i = 0; (i < 4) && len; i++, len--)
0325                 x |= (u32)(*tx_buf++) << (i * 8);
0326             tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
0327         }
0328 
0329         tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
0330     } else {
0331         unsigned int write_bytes;
0332         u8 bytes_per_word = tqspi->bytes_per_word;
0333 
0334         max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
0335         written_words = max_n_32bit;
0336         len = written_words * tqspi->bytes_per_word;
0337         if (len > t->len - tqspi->cur_pos)
0338             len = t->len - tqspi->cur_pos;
0339         write_bytes = len;
0340         for (count = 0; count < max_n_32bit; count++) {
0341             u32 x = 0;
0342 
0343             for (i = 0; len && (i < bytes_per_word); i++, len--)
0344                 x |= (u32)(*tx_buf++) << (i * 8);
0345             tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
0346         }
0347 
0348         tqspi->cur_tx_pos += write_bytes;
0349     }
0350 
0351     return written_words;
0352 }
0353 
0354 static unsigned int
0355 tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
0356 {
0357     u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
0358     unsigned int len, rx_full_count, count, i;
0359     unsigned int read_words = 0;
0360     u32 fifo_status, x;
0361 
0362     fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
0363     rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
0364     if (tqspi->is_packed) {
0365         len = tqspi->curr_dma_words * tqspi->bytes_per_word;
0366         for (count = 0; count < rx_full_count; count++) {
0367             x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
0368 
0369             for (i = 0; len && (i < 4); i++, len--)
0370                 *rx_buf++ = (x >> i * 8) & 0xff;
0371         }
0372 
0373         read_words += tqspi->curr_dma_words;
0374         tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
0375     } else {
0376         u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
0377         u8 bytes_per_word = tqspi->bytes_per_word;
0378         unsigned int read_bytes;
0379 
0380         len = rx_full_count * bytes_per_word;
0381         if (len > t->len - tqspi->cur_pos)
0382             len = t->len - tqspi->cur_pos;
0383         read_bytes = len;
0384         for (count = 0; count < rx_full_count; count++) {
0385             x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
0386 
0387             for (i = 0; len && (i < bytes_per_word); i++, len--)
0388                 *rx_buf++ = (x >> (i * 8)) & 0xff;
0389         }
0390 
0391         read_words += rx_full_count;
0392         tqspi->cur_rx_pos += read_bytes;
0393     }
0394 
0395     return read_words;
0396 }
0397 
0398 static void
0399 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
0400 {
0401     dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
0402                 tqspi->dma_buf_size, DMA_TO_DEVICE);
0403 
0404     /*
0405      * In packed mode, each word in FIFO may contain multiple packets
0406      * based on bits per word. So all bytes in each FIFO word are valid.
0407      *
0408      * In unpacked mode, each word in FIFO contains single packet and
0409      * based on bits per word any remaining bits in FIFO word will be
0410      * ignored by the hardware and are invalid bits.
0411      */
0412     if (tqspi->is_packed) {
0413         tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
0414     } else {
0415         u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
0416         unsigned int i, count, consume, write_bytes;
0417 
0418         /*
0419          * Fill tx_dma_buf to contain single packet in each word based
0420          * on bits per word from SPI core tx_buf.
0421          */
0422         consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
0423         if (consume > t->len - tqspi->cur_pos)
0424             consume = t->len - tqspi->cur_pos;
0425         write_bytes = consume;
0426         for (count = 0; count < tqspi->curr_dma_words; count++) {
0427             u32 x = 0;
0428 
0429             for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
0430                 x |= (u32)(*tx_buf++) << (i * 8);
0431             tqspi->tx_dma_buf[count] = x;
0432         }
0433 
0434         tqspi->cur_tx_pos += write_bytes;
0435     }
0436 
0437     dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
0438                    tqspi->dma_buf_size, DMA_TO_DEVICE);
0439 }
0440 
0441 static void
0442 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
0443 {
0444     dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
0445                 tqspi->dma_buf_size, DMA_FROM_DEVICE);
0446 
0447     if (tqspi->is_packed) {
0448         tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
0449     } else {
0450         unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
0451         u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
0452         unsigned int i, count, consume, read_bytes;
0453 
0454         /*
0455          * Each FIFO word contains single data packet.
0456          * Skip invalid bits in each FIFO word based on bits per word
0457          * and align bytes while filling in SPI core rx_buf.
0458          */
0459         consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
0460         if (consume > t->len - tqspi->cur_pos)
0461             consume = t->len - tqspi->cur_pos;
0462         read_bytes = consume;
0463         for (count = 0; count < tqspi->curr_dma_words; count++) {
0464             u32 x = tqspi->rx_dma_buf[count] & rx_mask;
0465 
0466             for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
0467                 *rx_buf++ = (x >> (i * 8)) & 0xff;
0468         }
0469 
0470         tqspi->cur_rx_pos += read_bytes;
0471     }
0472 
0473     dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
0474                    tqspi->dma_buf_size, DMA_FROM_DEVICE);
0475 }
0476 
0477 static void tegra_qspi_dma_complete(void *args)
0478 {
0479     struct completion *dma_complete = args;
0480 
0481     complete(dma_complete);
0482 }
0483 
0484 static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
0485 {
0486     dma_addr_t tx_dma_phys;
0487 
0488     reinit_completion(&tqspi->tx_dma_complete);
0489 
0490     if (tqspi->is_packed)
0491         tx_dma_phys = t->tx_dma;
0492     else
0493         tx_dma_phys = tqspi->tx_dma_phys;
0494 
0495     tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
0496                              len, DMA_MEM_TO_DEV,
0497                              DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
0498 
0499     if (!tqspi->tx_dma_desc) {
0500         dev_err(tqspi->dev, "Unable to get TX descriptor\n");
0501         return -EIO;
0502     }
0503 
0504     tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
0505     tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
0506     dmaengine_submit(tqspi->tx_dma_desc);
0507     dma_async_issue_pending(tqspi->tx_dma_chan);
0508 
0509     return 0;
0510 }
0511 
0512 static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
0513 {
0514     dma_addr_t rx_dma_phys;
0515 
0516     reinit_completion(&tqspi->rx_dma_complete);
0517 
0518     if (tqspi->is_packed)
0519         rx_dma_phys = t->rx_dma;
0520     else
0521         rx_dma_phys = tqspi->rx_dma_phys;
0522 
0523     tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
0524                              len, DMA_DEV_TO_MEM,
0525                              DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
0526 
0527     if (!tqspi->rx_dma_desc) {
0528         dev_err(tqspi->dev, "Unable to get RX descriptor\n");
0529         return -EIO;
0530     }
0531 
0532     tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
0533     tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
0534     dmaengine_submit(tqspi->rx_dma_desc);
0535     dma_async_issue_pending(tqspi->rx_dma_chan);
0536 
0537     return 0;
0538 }
0539 
0540 static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
0541 {
0542     void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
0543     u32 val;
0544 
0545     val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
0546     if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
0547         return 0;
0548 
0549     val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
0550     tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
0551 
0552     if (!atomic)
0553         return readl_relaxed_poll_timeout(addr, val,
0554                           (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
0555                           1000, 1000000);
0556 
0557     return readl_relaxed_poll_timeout_atomic(addr, val,
0558                          (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
0559                          1000, 1000000);
0560 }
0561 
0562 static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
0563 {
0564     u32 intr_mask;
0565 
0566     intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
0567     intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
0568     tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
0569 }
0570 
0571 static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
0572 {
0573     u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
0574     u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
0575     unsigned int len;
0576 
0577     len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
0578 
0579     if (t->tx_buf) {
0580         t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
0581         if (dma_mapping_error(tqspi->dev, t->tx_dma))
0582             return -ENOMEM;
0583     }
0584 
0585     if (t->rx_buf) {
0586         t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
0587         if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
0588             dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
0589             return -ENOMEM;
0590         }
0591     }
0592 
0593     return 0;
0594 }
0595 
0596 static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
0597 {
0598     unsigned int len;
0599 
0600     len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
0601 
0602     dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
0603     dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
0604 }
0605 
0606 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
0607 {
0608     struct dma_slave_config dma_sconfig = { 0 };
0609     unsigned int len;
0610     u8 dma_burst;
0611     int ret = 0;
0612     u32 val;
0613 
0614     if (tqspi->is_packed) {
0615         ret = tegra_qspi_dma_map_xfer(tqspi, t);
0616         if (ret < 0)
0617             return ret;
0618     }
0619 
0620     val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
0621     tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
0622 
0623     tegra_qspi_unmask_irq(tqspi);
0624 
0625     if (tqspi->is_packed)
0626         len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
0627     else
0628         len = tqspi->curr_dma_words * 4;
0629 
0630     /* set attention level based on length of transfer */
0631     val = 0;
0632     if (len & 0xf) {
0633         val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
0634         dma_burst = 1;
0635     } else if (((len) >> 4) & 0x1) {
0636         val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
0637         dma_burst = 4;
0638     } else {
0639         val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
0640         dma_burst = 8;
0641     }
0642 
0643     tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
0644     tqspi->dma_control_reg = val;
0645 
0646     dma_sconfig.device_fc = true;
0647     if (tqspi->cur_direction & DATA_DIR_TX) {
0648         dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
0649         dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0650         dma_sconfig.dst_maxburst = dma_burst;
0651         ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
0652         if (ret < 0) {
0653             dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
0654             return ret;
0655         }
0656 
0657         tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
0658         ret = tegra_qspi_start_tx_dma(tqspi, t, len);
0659         if (ret < 0) {
0660             dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
0661             return ret;
0662         }
0663     }
0664 
0665     if (tqspi->cur_direction & DATA_DIR_RX) {
0666         dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
0667         dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0668         dma_sconfig.src_maxburst = dma_burst;
0669         ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
0670         if (ret < 0) {
0671             dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
0672             return ret;
0673         }
0674 
0675         dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
0676                        tqspi->dma_buf_size,
0677                        DMA_FROM_DEVICE);
0678 
0679         ret = tegra_qspi_start_rx_dma(tqspi, t, len);
0680         if (ret < 0) {
0681             dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
0682             if (tqspi->cur_direction & DATA_DIR_TX)
0683                 dmaengine_terminate_all(tqspi->tx_dma_chan);
0684             return ret;
0685         }
0686     }
0687 
0688     tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
0689 
0690     tqspi->is_curr_dma_xfer = true;
0691     tqspi->dma_control_reg = val;
0692     val |= QSPI_DMA_EN;
0693     tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
0694 
0695     return ret;
0696 }
0697 
0698 static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
0699 {
0700     u32 val;
0701     unsigned int cur_words;
0702 
0703     if (qspi->cur_direction & DATA_DIR_TX)
0704         cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
0705     else
0706         cur_words = qspi->curr_dma_words;
0707 
0708     val = QSPI_DMA_BLK_SET(cur_words - 1);
0709     tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
0710 
0711     tegra_qspi_unmask_irq(qspi);
0712 
0713     qspi->is_curr_dma_xfer = false;
0714     val = qspi->command1_reg;
0715     val |= QSPI_PIO;
0716     tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
0717 
0718     return 0;
0719 }
0720 
0721 static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
0722 {
0723     if (tqspi->tx_dma_buf) {
0724         dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
0725                   tqspi->tx_dma_buf, tqspi->tx_dma_phys);
0726         tqspi->tx_dma_buf = NULL;
0727     }
0728 
0729     if (tqspi->tx_dma_chan) {
0730         dma_release_channel(tqspi->tx_dma_chan);
0731         tqspi->tx_dma_chan = NULL;
0732     }
0733 
0734     if (tqspi->rx_dma_buf) {
0735         dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
0736                   tqspi->rx_dma_buf, tqspi->rx_dma_phys);
0737         tqspi->rx_dma_buf = NULL;
0738     }
0739 
0740     if (tqspi->rx_dma_chan) {
0741         dma_release_channel(tqspi->rx_dma_chan);
0742         tqspi->rx_dma_chan = NULL;
0743     }
0744 }
0745 
0746 static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
0747 {
0748     struct dma_chan *dma_chan;
0749     dma_addr_t dma_phys;
0750     u32 *dma_buf;
0751     int err;
0752 
0753     dma_chan = dma_request_chan(tqspi->dev, "rx");
0754     if (IS_ERR(dma_chan)) {
0755         err = PTR_ERR(dma_chan);
0756         goto err_out;
0757     }
0758 
0759     tqspi->rx_dma_chan = dma_chan;
0760 
0761     dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
0762     if (!dma_buf) {
0763         err = -ENOMEM;
0764         goto err_out;
0765     }
0766 
0767     tqspi->rx_dma_buf = dma_buf;
0768     tqspi->rx_dma_phys = dma_phys;
0769 
0770     dma_chan = dma_request_chan(tqspi->dev, "tx");
0771     if (IS_ERR(dma_chan)) {
0772         err = PTR_ERR(dma_chan);
0773         goto err_out;
0774     }
0775 
0776     tqspi->tx_dma_chan = dma_chan;
0777 
0778     dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
0779     if (!dma_buf) {
0780         err = -ENOMEM;
0781         goto err_out;
0782     }
0783 
0784     tqspi->tx_dma_buf = dma_buf;
0785     tqspi->tx_dma_phys = dma_phys;
0786     tqspi->use_dma = true;
0787 
0788     return 0;
0789 
0790 err_out:
0791     tegra_qspi_deinit_dma(tqspi);
0792 
0793     if (err != -EPROBE_DEFER) {
0794         dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
0795         dev_err(tqspi->dev, "falling back to PIO\n");
0796         return 0;
0797     }
0798 
0799     return err;
0800 }
0801 
0802 static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
0803                      bool is_first_of_msg)
0804 {
0805     struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
0806     struct tegra_qspi_client_data *cdata = spi->controller_data;
0807     u32 command1, command2, speed = t->speed_hz;
0808     u8 bits_per_word = t->bits_per_word;
0809     u32 tx_tap = 0, rx_tap = 0;
0810     int req_mode;
0811 
0812     if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
0813         clk_set_rate(tqspi->clk, speed);
0814         tqspi->cur_speed = speed;
0815     }
0816 
0817     tqspi->cur_pos = 0;
0818     tqspi->cur_rx_pos = 0;
0819     tqspi->cur_tx_pos = 0;
0820     tqspi->curr_xfer = t;
0821 
0822     if (is_first_of_msg) {
0823         tegra_qspi_mask_clear_irq(tqspi);
0824 
0825         command1 = tqspi->def_command1_reg;
0826         command1 |= QSPI_CS_SEL(spi->chip_select);
0827         command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
0828 
0829         command1 &= ~QSPI_CONTROL_MODE_MASK;
0830         req_mode = spi->mode & 0x3;
0831         if (req_mode == SPI_MODE_3)
0832             command1 |= QSPI_CONTROL_MODE_3;
0833         else
0834             command1 |= QSPI_CONTROL_MODE_0;
0835 
0836         if (spi->mode & SPI_CS_HIGH)
0837             command1 |= QSPI_CS_SW_VAL;
0838         else
0839             command1 &= ~QSPI_CS_SW_VAL;
0840         tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
0841 
0842         if (cdata && cdata->tx_clk_tap_delay)
0843             tx_tap = cdata->tx_clk_tap_delay;
0844 
0845         if (cdata && cdata->rx_clk_tap_delay)
0846             rx_tap = cdata->rx_clk_tap_delay;
0847 
0848         command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
0849         if (command2 != tqspi->def_command2_reg)
0850             tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
0851 
0852     } else {
0853         command1 = tqspi->command1_reg;
0854         command1 &= ~QSPI_BIT_LENGTH(~0);
0855         command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
0856     }
0857 
0858     command1 &= ~QSPI_SDR_DDR_SEL;
0859 
0860     return command1;
0861 }
0862 
0863 static int tegra_qspi_start_transfer_one(struct spi_device *spi,
0864                      struct spi_transfer *t, u32 command1)
0865 {
0866     struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
0867     unsigned int total_fifo_words;
0868     u8 bus_width = 0;
0869     int ret;
0870 
0871     total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
0872 
0873     command1 &= ~QSPI_PACKED;
0874     if (tqspi->is_packed)
0875         command1 |= QSPI_PACKED;
0876     tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
0877 
0878     tqspi->cur_direction = 0;
0879 
0880     command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
0881     if (t->rx_buf) {
0882         command1 |= QSPI_RX_EN;
0883         tqspi->cur_direction |= DATA_DIR_RX;
0884         bus_width = t->rx_nbits;
0885     }
0886 
0887     if (t->tx_buf) {
0888         command1 |= QSPI_TX_EN;
0889         tqspi->cur_direction |= DATA_DIR_TX;
0890         bus_width = t->tx_nbits;
0891     }
0892 
0893     command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
0894 
0895     if (bus_width == SPI_NBITS_QUAD)
0896         command1 |= QSPI_INTERFACE_WIDTH_QUAD;
0897     else if (bus_width == SPI_NBITS_DUAL)
0898         command1 |= QSPI_INTERFACE_WIDTH_DUAL;
0899     else
0900         command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
0901 
0902     tqspi->command1_reg = command1;
0903 
0904     tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
0905 
0906     ret = tegra_qspi_flush_fifos(tqspi, false);
0907     if (ret < 0)
0908         return ret;
0909 
0910     if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
0911         ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
0912     else
0913         ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
0914 
0915     return ret;
0916 }
0917 
0918 static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
0919 {
0920     struct tegra_qspi_client_data *cdata;
0921 
0922     cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
0923     if (!cdata)
0924         return NULL;
0925 
0926     device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
0927                  &cdata->tx_clk_tap_delay);
0928     device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
0929                  &cdata->rx_clk_tap_delay);
0930 
0931     return cdata;
0932 }
0933 
0934 static int tegra_qspi_setup(struct spi_device *spi)
0935 {
0936     struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
0937     struct tegra_qspi_client_data *cdata = spi->controller_data;
0938     unsigned long flags;
0939     u32 val;
0940     int ret;
0941 
0942     ret = pm_runtime_resume_and_get(tqspi->dev);
0943     if (ret < 0) {
0944         dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
0945         return ret;
0946     }
0947 
0948     if (!cdata) {
0949         cdata = tegra_qspi_parse_cdata_dt(spi);
0950         spi->controller_data = cdata;
0951     }
0952     spin_lock_irqsave(&tqspi->lock, flags);
0953 
0954     /* keep default cs state to inactive */
0955     val = tqspi->def_command1_reg;
0956     val |= QSPI_CS_SEL(spi->chip_select);
0957     if (spi->mode & SPI_CS_HIGH)
0958         val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select);
0959     else
0960         val |= QSPI_CS_POL_INACTIVE(spi->chip_select);
0961 
0962     tqspi->def_command1_reg = val;
0963     tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
0964 
0965     spin_unlock_irqrestore(&tqspi->lock, flags);
0966 
0967     pm_runtime_put(tqspi->dev);
0968 
0969     return 0;
0970 }
0971 
0972 static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
0973 {
0974     dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
0975     dev_dbg(tqspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
0976         tegra_qspi_readl(tqspi, QSPI_COMMAND1),
0977         tegra_qspi_readl(tqspi, QSPI_COMMAND2));
0978     dev_dbg(tqspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
0979         tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
0980         tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
0981     dev_dbg(tqspi->dev, "INTR_MASK:  0x%08x | MISC: 0x%08x\n",
0982         tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
0983         tegra_qspi_readl(tqspi, QSPI_MISC_REG));
0984     dev_dbg(tqspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
0985         tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
0986         tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
0987 }
0988 
0989 static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
0990 {
0991     dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
0992     tegra_qspi_dump_regs(tqspi);
0993     tegra_qspi_flush_fifos(tqspi, true);
0994     if (device_reset(tqspi->dev) < 0)
0995         dev_warn_once(tqspi->dev, "device reset failed\n");
0996 }
0997 
0998 static void tegra_qspi_transfer_end(struct spi_device *spi)
0999 {
1000     struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
1001     int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1002 
1003     if (cs_val)
1004         tqspi->command1_reg |= QSPI_CS_SW_VAL;
1005     else
1006         tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
1007     tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1008     tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1009 }
1010 
1011 static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
1012 {
1013     u32 cmd_config = 0;
1014 
1015     /* Extract Command configuration and value */
1016     if (is_ddr)
1017         cmd_config |= QSPI_COMMAND_SDR_DDR;
1018     else
1019         cmd_config &= ~QSPI_COMMAND_SDR_DDR;
1020 
1021     cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
1022     cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
1023 
1024     return cmd_config;
1025 }
1026 
1027 static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
1028 {
1029     u32 addr_config = 0;
1030 
1031     /* Extract Address configuration and value */
1032     is_ddr = 0; //Only SDR mode supported
1033     bus_width = 0; //X1 mode
1034 
1035     if (is_ddr)
1036         addr_config |= QSPI_ADDRESS_SDR_DDR;
1037     else
1038         addr_config &= ~QSPI_ADDRESS_SDR_DDR;
1039 
1040     addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
1041     addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
1042 
1043     return addr_config;
1044 }
1045 
1046 static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
1047                     struct spi_message *msg)
1048 {
1049     bool is_first_msg = true;
1050     struct spi_transfer *xfer;
1051     struct spi_device *spi = msg->spi;
1052     u8 transfer_phase = 0;
1053     u32 cmd1 = 0, dma_ctl = 0;
1054     int ret = 0;
1055     u32 address_value = 0;
1056     u32 cmd_config = 0, addr_config = 0;
1057     u8 cmd_value = 0, val = 0;
1058 
1059     /* Enable Combined sequence mode */
1060     val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1061     val |= QSPI_CMB_SEQ_EN;
1062     tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1063     /* Process individual transfer list */
1064     list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1065         switch (transfer_phase) {
1066         case CMD_TRANSFER:
1067             /* X1 SDR mode */
1068             cmd_config = tegra_qspi_cmd_config(false, 0,
1069                                xfer->len);
1070             cmd_value = *((const u8 *)(xfer->tx_buf));
1071             break;
1072         case ADDR_TRANSFER:
1073             /* X1 SDR mode */
1074             addr_config = tegra_qspi_addr_config(false, 0,
1075                                  xfer->len);
1076             address_value = *((const u32 *)(xfer->tx_buf));
1077             break;
1078         case DATA_TRANSFER:
1079             /* Program Command, Address value in register */
1080             tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
1081             tegra_qspi_writel(tqspi, address_value,
1082                       QSPI_CMB_SEQ_ADDR);
1083             /* Program Command and Address config in register */
1084             tegra_qspi_writel(tqspi, cmd_config,
1085                       QSPI_CMB_SEQ_CMD_CFG);
1086             tegra_qspi_writel(tqspi, addr_config,
1087                       QSPI_CMB_SEQ_ADDR_CFG);
1088 
1089             reinit_completion(&tqspi->xfer_completion);
1090             cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
1091                                  is_first_msg);
1092             ret = tegra_qspi_start_transfer_one(spi, xfer,
1093                                 cmd1);
1094 
1095             if (ret < 0) {
1096                 dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
1097                     ret);
1098                 return ret;
1099             }
1100 
1101             is_first_msg = false;
1102             ret = wait_for_completion_timeout
1103                     (&tqspi->xfer_completion,
1104                     QSPI_DMA_TIMEOUT);
1105 
1106             if (WARN_ON(ret == 0)) {
1107                 dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
1108                     ret);
1109                 if (tqspi->is_curr_dma_xfer &&
1110                     (tqspi->cur_direction & DATA_DIR_TX))
1111                     dmaengine_terminate_all
1112                         (tqspi->tx_dma_chan);
1113 
1114                 if (tqspi->is_curr_dma_xfer &&
1115                     (tqspi->cur_direction & DATA_DIR_RX))
1116                     dmaengine_terminate_all
1117                         (tqspi->rx_dma_chan);
1118 
1119                 /* Abort transfer by resetting pio/dma bit */
1120                 if (!tqspi->is_curr_dma_xfer) {
1121                     cmd1 = tegra_qspi_readl
1122                             (tqspi,
1123                              QSPI_COMMAND1);
1124                     cmd1 &= ~QSPI_PIO;
1125                     tegra_qspi_writel
1126                             (tqspi, cmd1,
1127                              QSPI_COMMAND1);
1128                 } else {
1129                     dma_ctl = tegra_qspi_readl
1130                             (tqspi,
1131                              QSPI_DMA_CTL);
1132                     dma_ctl &= ~QSPI_DMA_EN;
1133                     tegra_qspi_writel(tqspi, dma_ctl,
1134                               QSPI_DMA_CTL);
1135                 }
1136 
1137                 /* Reset controller if timeout happens */
1138                 if (device_reset(tqspi->dev) < 0)
1139                     dev_warn_once(tqspi->dev,
1140                               "device reset failed\n");
1141                 ret = -EIO;
1142                 goto exit;
1143             }
1144 
1145             if (tqspi->tx_status ||  tqspi->rx_status) {
1146                 dev_err(tqspi->dev, "QSPI Transfer failed\n");
1147                 tqspi->tx_status = 0;
1148                 tqspi->rx_status = 0;
1149                 ret = -EIO;
1150                 goto exit;
1151             }
1152             break;
1153         default:
1154             ret = -EINVAL;
1155             goto exit;
1156         }
1157         msg->actual_length += xfer->len;
1158         transfer_phase++;
1159     }
1160 
1161 exit:
1162     msg->status = ret;
1163 
1164     return ret;
1165 }
1166 
1167 static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
1168                         struct spi_message *msg)
1169 {
1170     struct spi_device *spi = msg->spi;
1171     struct spi_transfer *transfer;
1172     bool is_first_msg = true;
1173     int ret = 0, val = 0;
1174 
1175     msg->status = 0;
1176     msg->actual_length = 0;
1177     tqspi->tx_status = 0;
1178     tqspi->rx_status = 0;
1179 
1180     /* Disable Combined sequence mode */
1181     val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1182     val &= ~QSPI_CMB_SEQ_EN;
1183     tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1184     list_for_each_entry(transfer, &msg->transfers, transfer_list) {
1185         struct spi_transfer *xfer = transfer;
1186         u8 dummy_bytes = 0;
1187         u32 cmd1;
1188 
1189         tqspi->dummy_cycles = 0;
1190         /*
1191          * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
1192          * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
1193          * So, check if the next transfer is dummy data transfer and program dummy
1194          * clock cycles along with the current transfer and skip next transfer.
1195          */
1196         if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
1197             struct spi_transfer *next_xfer;
1198 
1199             next_xfer = list_next_entry(xfer, transfer_list);
1200             if (next_xfer->dummy_data) {
1201                 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
1202 
1203                 if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
1204                     tqspi->dummy_cycles = dummy_cycles;
1205                     dummy_bytes = next_xfer->len;
1206                     transfer = next_xfer;
1207                 }
1208             }
1209         }
1210 
1211         reinit_completion(&tqspi->xfer_completion);
1212 
1213         cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
1214 
1215         ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
1216         if (ret < 0) {
1217             dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
1218             goto complete_xfer;
1219         }
1220 
1221         ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1222                           QSPI_DMA_TIMEOUT);
1223         if (WARN_ON(ret == 0)) {
1224             dev_err(tqspi->dev, "transfer timeout\n");
1225             if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1226                 dmaengine_terminate_all(tqspi->tx_dma_chan);
1227             if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1228                 dmaengine_terminate_all(tqspi->rx_dma_chan);
1229             tegra_qspi_handle_error(tqspi);
1230             ret = -EIO;
1231             goto complete_xfer;
1232         }
1233 
1234         if (tqspi->tx_status ||  tqspi->rx_status) {
1235             tegra_qspi_handle_error(tqspi);
1236             ret = -EIO;
1237             goto complete_xfer;
1238         }
1239 
1240         msg->actual_length += xfer->len + dummy_bytes;
1241 
1242 complete_xfer:
1243         if (ret < 0) {
1244             tegra_qspi_transfer_end(spi);
1245             spi_transfer_delay_exec(xfer);
1246             goto exit;
1247         }
1248 
1249         if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1250             /* de-activate CS after last transfer only when cs_change is not set */
1251             if (!xfer->cs_change) {
1252                 tegra_qspi_transfer_end(spi);
1253                 spi_transfer_delay_exec(xfer);
1254             }
1255         } else if (xfer->cs_change) {
1256              /* de-activated CS between the transfers only when cs_change is set */
1257             tegra_qspi_transfer_end(spi);
1258             spi_transfer_delay_exec(xfer);
1259         }
1260     }
1261 
1262     ret = 0;
1263 exit:
1264     msg->status = ret;
1265 
1266     return ret;
1267 }
1268 
1269 static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
1270                     struct spi_message *msg)
1271 {
1272     int transfer_count = 0;
1273     struct spi_transfer *xfer;
1274 
1275     list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1276         transfer_count++;
1277     }
1278     if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
1279         return false;
1280     xfer = list_first_entry(&msg->transfers, typeof(*xfer),
1281                 transfer_list);
1282     if (xfer->len > 2)
1283         return false;
1284     xfer = list_next_entry(xfer, transfer_list);
1285     if (xfer->len > 4 || xfer->len < 3)
1286         return false;
1287     xfer = list_next_entry(xfer, transfer_list);
1288     if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
1289         return false;
1290 
1291     return true;
1292 }
1293 
1294 static int tegra_qspi_transfer_one_message(struct spi_master *master,
1295                        struct spi_message *msg)
1296 {
1297     struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1298     int ret;
1299 
1300     if (tegra_qspi_validate_cmb_seq(tqspi, msg))
1301         ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
1302     else
1303         ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
1304 
1305     spi_finalize_current_message(master);
1306 
1307     return ret;
1308 }
1309 
1310 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1311 {
1312     struct spi_transfer *t = tqspi->curr_xfer;
1313     unsigned long flags;
1314 
1315     spin_lock_irqsave(&tqspi->lock, flags);
1316 
1317     if (tqspi->tx_status ||  tqspi->rx_status) {
1318         tegra_qspi_handle_error(tqspi);
1319         complete(&tqspi->xfer_completion);
1320         goto exit;
1321     }
1322 
1323     if (tqspi->cur_direction & DATA_DIR_RX)
1324         tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1325 
1326     if (tqspi->cur_direction & DATA_DIR_TX)
1327         tqspi->cur_pos = tqspi->cur_tx_pos;
1328     else
1329         tqspi->cur_pos = tqspi->cur_rx_pos;
1330 
1331     if (tqspi->cur_pos == t->len) {
1332         complete(&tqspi->xfer_completion);
1333         goto exit;
1334     }
1335 
1336     tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1337     tegra_qspi_start_cpu_based_transfer(tqspi, t);
1338 exit:
1339     spin_unlock_irqrestore(&tqspi->lock, flags);
1340     return IRQ_HANDLED;
1341 }
1342 
1343 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1344 {
1345     struct spi_transfer *t = tqspi->curr_xfer;
1346     unsigned int total_fifo_words;
1347     unsigned long flags;
1348     long wait_status;
1349     int err = 0;
1350 
1351     if (tqspi->cur_direction & DATA_DIR_TX) {
1352         if (tqspi->tx_status) {
1353             dmaengine_terminate_all(tqspi->tx_dma_chan);
1354             err += 1;
1355         } else {
1356             wait_status = wait_for_completion_interruptible_timeout(
1357                 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1358             if (wait_status <= 0) {
1359                 dmaengine_terminate_all(tqspi->tx_dma_chan);
1360                 dev_err(tqspi->dev, "failed TX DMA transfer\n");
1361                 err += 1;
1362             }
1363         }
1364     }
1365 
1366     if (tqspi->cur_direction & DATA_DIR_RX) {
1367         if (tqspi->rx_status) {
1368             dmaengine_terminate_all(tqspi->rx_dma_chan);
1369             err += 2;
1370         } else {
1371             wait_status = wait_for_completion_interruptible_timeout(
1372                 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1373             if (wait_status <= 0) {
1374                 dmaengine_terminate_all(tqspi->rx_dma_chan);
1375                 dev_err(tqspi->dev, "failed RX DMA transfer\n");
1376                 err += 2;
1377             }
1378         }
1379     }
1380 
1381     spin_lock_irqsave(&tqspi->lock, flags);
1382 
1383     if (err) {
1384         tegra_qspi_dma_unmap_xfer(tqspi, t);
1385         tegra_qspi_handle_error(tqspi);
1386         complete(&tqspi->xfer_completion);
1387         goto exit;
1388     }
1389 
1390     if (tqspi->cur_direction & DATA_DIR_RX)
1391         tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1392 
1393     if (tqspi->cur_direction & DATA_DIR_TX)
1394         tqspi->cur_pos = tqspi->cur_tx_pos;
1395     else
1396         tqspi->cur_pos = tqspi->cur_rx_pos;
1397 
1398     if (tqspi->cur_pos == t->len) {
1399         tegra_qspi_dma_unmap_xfer(tqspi, t);
1400         complete(&tqspi->xfer_completion);
1401         goto exit;
1402     }
1403 
1404     tegra_qspi_dma_unmap_xfer(tqspi, t);
1405 
1406     /* continue transfer in current message */
1407     total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1408     if (total_fifo_words > QSPI_FIFO_DEPTH)
1409         err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1410     else
1411         err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1412 
1413 exit:
1414     spin_unlock_irqrestore(&tqspi->lock, flags);
1415     return IRQ_HANDLED;
1416 }
1417 
1418 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1419 {
1420     struct tegra_qspi *tqspi = context_data;
1421 
1422     tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1423 
1424     if (tqspi->cur_direction & DATA_DIR_TX)
1425         tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1426 
1427     if (tqspi->cur_direction & DATA_DIR_RX)
1428         tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1429 
1430     tegra_qspi_mask_clear_irq(tqspi);
1431 
1432     if (!tqspi->is_curr_dma_xfer)
1433         return handle_cpu_based_xfer(tqspi);
1434 
1435     return handle_dma_based_xfer(tqspi);
1436 }
1437 
1438 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1439     .has_dma = true,
1440     .cmb_xfer_capable = false,
1441     .cs_count = 1,
1442 };
1443 
1444 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1445     .has_dma = true,
1446     .cmb_xfer_capable = true,
1447     .cs_count = 1,
1448 };
1449 
1450 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1451     .has_dma = false,
1452     .cmb_xfer_capable = true,
1453     .cs_count = 1,
1454 };
1455 
1456 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1457     .has_dma = false,
1458     .cmb_xfer_capable = true,
1459     .cs_count = 4,
1460 };
1461 
1462 static const struct of_device_id tegra_qspi_of_match[] = {
1463     {
1464         .compatible = "nvidia,tegra210-qspi",
1465         .data       = &tegra210_qspi_soc_data,
1466     }, {
1467         .compatible = "nvidia,tegra186-qspi",
1468         .data       = &tegra186_qspi_soc_data,
1469     }, {
1470         .compatible = "nvidia,tegra194-qspi",
1471         .data       = &tegra186_qspi_soc_data,
1472     }, {
1473         .compatible = "nvidia,tegra234-qspi",
1474         .data       = &tegra234_qspi_soc_data,
1475     }, {
1476         .compatible = "nvidia,tegra241-qspi",
1477         .data       = &tegra241_qspi_soc_data,
1478     },
1479     {}
1480 };
1481 
1482 MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1483 
1484 #ifdef CONFIG_ACPI
1485 static const struct acpi_device_id tegra_qspi_acpi_match[] = {
1486     {
1487         .id = "NVDA1213",
1488         .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
1489     }, {
1490         .id = "NVDA1313",
1491         .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
1492     }, {
1493         .id = "NVDA1413",
1494         .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
1495     }, {
1496         .id = "NVDA1513",
1497         .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
1498     },
1499     {}
1500 };
1501 
1502 MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
1503 #endif
1504 
1505 static int tegra_qspi_probe(struct platform_device *pdev)
1506 {
1507     struct spi_master   *master;
1508     struct tegra_qspi   *tqspi;
1509     struct resource     *r;
1510     int ret, qspi_irq;
1511     int bus_num;
1512 
1513     master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
1514     if (!master)
1515         return -ENOMEM;
1516 
1517     platform_set_drvdata(pdev, master);
1518     tqspi = spi_master_get_devdata(master);
1519 
1520     master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1521                 SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1522     master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1523     master->setup = tegra_qspi_setup;
1524     master->transfer_one_message = tegra_qspi_transfer_one_message;
1525     master->num_chipselect = 1;
1526     master->auto_runtime_pm = true;
1527 
1528     bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1529     if (bus_num >= 0)
1530         master->bus_num = bus_num;
1531 
1532     tqspi->master = master;
1533     tqspi->dev = &pdev->dev;
1534     spin_lock_init(&tqspi->lock);
1535 
1536     tqspi->soc_data = device_get_match_data(&pdev->dev);
1537     master->num_chipselect = tqspi->soc_data->cs_count;
1538     r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1539     tqspi->base = devm_ioremap_resource(&pdev->dev, r);
1540     if (IS_ERR(tqspi->base))
1541         return PTR_ERR(tqspi->base);
1542 
1543     tqspi->phys = r->start;
1544     qspi_irq = platform_get_irq(pdev, 0);
1545     if (qspi_irq < 0)
1546         return qspi_irq;
1547     tqspi->irq = qspi_irq;
1548 
1549     if (!has_acpi_companion(tqspi->dev)) {
1550         tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1551         if (IS_ERR(tqspi->clk)) {
1552             ret = PTR_ERR(tqspi->clk);
1553             dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1554             return ret;
1555         }
1556 
1557     }
1558 
1559     tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1560     tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1561 
1562     ret = tegra_qspi_init_dma(tqspi);
1563     if (ret < 0)
1564         return ret;
1565 
1566     if (tqspi->use_dma)
1567         tqspi->max_buf_size = tqspi->dma_buf_size;
1568 
1569     init_completion(&tqspi->tx_dma_complete);
1570     init_completion(&tqspi->rx_dma_complete);
1571     init_completion(&tqspi->xfer_completion);
1572 
1573     pm_runtime_enable(&pdev->dev);
1574     ret = pm_runtime_resume_and_get(&pdev->dev);
1575     if (ret < 0) {
1576         dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1577         goto exit_pm_disable;
1578     }
1579 
1580     if (device_reset(tqspi->dev) < 0)
1581         dev_warn_once(tqspi->dev, "device reset failed\n");
1582 
1583     tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW |  QSPI_CS_SW_VAL;
1584     tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1585     tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1586     tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1587     tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1588 
1589     pm_runtime_put(&pdev->dev);
1590 
1591     ret = request_threaded_irq(tqspi->irq, NULL,
1592                    tegra_qspi_isr_thread, IRQF_ONESHOT,
1593                    dev_name(&pdev->dev), tqspi);
1594     if (ret < 0) {
1595         dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1596         goto exit_pm_disable;
1597     }
1598 
1599     master->dev.of_node = pdev->dev.of_node;
1600     ret = spi_register_master(master);
1601     if (ret < 0) {
1602         dev_err(&pdev->dev, "failed to register master: %d\n", ret);
1603         goto exit_free_irq;
1604     }
1605 
1606     return 0;
1607 
1608 exit_free_irq:
1609     free_irq(qspi_irq, tqspi);
1610 exit_pm_disable:
1611     pm_runtime_force_suspend(&pdev->dev);
1612     tegra_qspi_deinit_dma(tqspi);
1613     return ret;
1614 }
1615 
1616 static int tegra_qspi_remove(struct platform_device *pdev)
1617 {
1618     struct spi_master *master = platform_get_drvdata(pdev);
1619     struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1620 
1621     spi_unregister_master(master);
1622     free_irq(tqspi->irq, tqspi);
1623     pm_runtime_force_suspend(&pdev->dev);
1624     tegra_qspi_deinit_dma(tqspi);
1625 
1626     return 0;
1627 }
1628 
1629 static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1630 {
1631     struct spi_master *master = dev_get_drvdata(dev);
1632 
1633     return spi_master_suspend(master);
1634 }
1635 
1636 static int __maybe_unused tegra_qspi_resume(struct device *dev)
1637 {
1638     struct spi_master *master = dev_get_drvdata(dev);
1639     struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1640     int ret;
1641 
1642     ret = pm_runtime_resume_and_get(dev);
1643     if (ret < 0) {
1644         dev_err(dev, "failed to get runtime PM: %d\n", ret);
1645         return ret;
1646     }
1647 
1648     tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1649     tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1650     pm_runtime_put(dev);
1651 
1652     return spi_master_resume(master);
1653 }
1654 
1655 static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1656 {
1657     struct spi_master *master = dev_get_drvdata(dev);
1658     struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1659 
1660     /* Runtime pm disabled with ACPI */
1661     if (has_acpi_companion(tqspi->dev))
1662         return 0;
1663     /* flush all write which are in PPSB queue by reading back */
1664     tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1665 
1666     clk_disable_unprepare(tqspi->clk);
1667 
1668     return 0;
1669 }
1670 
1671 static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1672 {
1673     struct spi_master *master = dev_get_drvdata(dev);
1674     struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1675     int ret;
1676 
1677     /* Runtime pm disabled with ACPI */
1678     if (has_acpi_companion(tqspi->dev))
1679         return 0;
1680     ret = clk_prepare_enable(tqspi->clk);
1681     if (ret < 0)
1682         dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1683 
1684     return ret;
1685 }
1686 
1687 static const struct dev_pm_ops tegra_qspi_pm_ops = {
1688     SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1689     SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1690 };
1691 
1692 static struct platform_driver tegra_qspi_driver = {
1693     .driver = {
1694         .name       = "tegra-qspi",
1695         .pm     = &tegra_qspi_pm_ops,
1696         .of_match_table = tegra_qspi_of_match,
1697         .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
1698     },
1699     .probe =    tegra_qspi_probe,
1700     .remove =   tegra_qspi_remove,
1701 };
1702 module_platform_driver(tegra_qspi_driver);
1703 
1704 MODULE_ALIAS("platform:qspi-tegra");
1705 MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1706 MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1707 MODULE_LICENSE("GPL v2");