0001
0002
0003
0004
0005
0006 #include <linux/clk.h>
0007 #include <linux/delay.h>
0008 #include <linux/err.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/io.h>
0011 #include <linux/list.h>
0012 #include <linux/module.h>
0013 #include <linux/of.h>
0014 #include <linux/of_device.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/pm_runtime.h>
0017 #include <linux/spi/spi.h>
0018 #include <linux/dmaengine.h>
0019 #include <linux/dma-mapping.h>
0020
0021 #define QUP_CONFIG 0x0000
0022 #define QUP_STATE 0x0004
0023 #define QUP_IO_M_MODES 0x0008
0024 #define QUP_SW_RESET 0x000c
0025 #define QUP_OPERATIONAL 0x0018
0026 #define QUP_ERROR_FLAGS 0x001c
0027 #define QUP_ERROR_FLAGS_EN 0x0020
0028 #define QUP_OPERATIONAL_MASK 0x0028
0029 #define QUP_HW_VERSION 0x0030
0030 #define QUP_MX_OUTPUT_CNT 0x0100
0031 #define QUP_OUTPUT_FIFO 0x0110
0032 #define QUP_MX_WRITE_CNT 0x0150
0033 #define QUP_MX_INPUT_CNT 0x0200
0034 #define QUP_MX_READ_CNT 0x0208
0035 #define QUP_INPUT_FIFO 0x0218
0036
0037 #define SPI_CONFIG 0x0300
0038 #define SPI_IO_CONTROL 0x0304
0039 #define SPI_ERROR_FLAGS 0x0308
0040 #define SPI_ERROR_FLAGS_EN 0x030c
0041
0042
0043 #define QUP_CONFIG_SPI_MODE (1 << 8)
0044 #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
0045 #define QUP_CONFIG_NO_INPUT BIT(7)
0046 #define QUP_CONFIG_NO_OUTPUT BIT(6)
0047 #define QUP_CONFIG_N 0x001f
0048
0049
0050 #define QUP_STATE_VALID BIT(2)
0051 #define QUP_STATE_RESET 0
0052 #define QUP_STATE_RUN 1
0053 #define QUP_STATE_PAUSE 3
0054 #define QUP_STATE_MASK 3
0055 #define QUP_STATE_CLEAR 2
0056
0057 #define QUP_HW_VERSION_2_1_1 0x20010001
0058
0059
0060 #define QUP_IO_M_PACK_EN BIT(15)
0061 #define QUP_IO_M_UNPACK_EN BIT(14)
0062 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
0063 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
0064 #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
0065 #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
0066
0067 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
0068 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
0069 #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
0070 #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
0071
0072 #define QUP_IO_M_MODE_FIFO 0
0073 #define QUP_IO_M_MODE_BLOCK 1
0074 #define QUP_IO_M_MODE_DMOV 2
0075 #define QUP_IO_M_MODE_BAM 3
0076
0077
0078 #define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
0079 #define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
0080 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
0081 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
0082 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
0083 #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
0084 #define QUP_OP_IN_FIFO_FULL BIT(7)
0085 #define QUP_OP_OUT_FIFO_FULL BIT(6)
0086 #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
0087 #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
0088
0089
0090 #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
0091 #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
0092 #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
0093 #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
0094
0095
0096 #define SPI_CONFIG_HS_MODE BIT(10)
0097 #define SPI_CONFIG_INPUT_FIRST BIT(9)
0098 #define SPI_CONFIG_LOOPBACK BIT(8)
0099
0100
0101 #define SPI_IO_C_FORCE_CS BIT(11)
0102 #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
0103 #define SPI_IO_C_MX_CS_MODE BIT(8)
0104 #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
0105 #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
0106 #define SPI_IO_C_CS_SELECT_MASK 0x000c
0107 #define SPI_IO_C_TRISTATE_CS BIT(1)
0108 #define SPI_IO_C_NO_TRI_STATE BIT(0)
0109
0110
0111 #define SPI_ERROR_CLK_OVER_RUN BIT(1)
0112 #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
0113
0114 #define SPI_NUM_CHIPSELECTS 4
0115
0116 #define SPI_MAX_XFER (SZ_64K - 64)
0117
0118
0119 #define SPI_HS_MIN_RATE 26000000
0120 #define SPI_MAX_RATE 50000000
0121
0122 #define SPI_DELAY_THRESHOLD 1
0123 #define SPI_DELAY_RETRY 10
0124
0125 struct spi_qup {
0126 void __iomem *base;
0127 struct device *dev;
0128 struct clk *cclk;
0129 struct clk *iclk;
0130 int irq;
0131 spinlock_t lock;
0132
0133 int in_fifo_sz;
0134 int out_fifo_sz;
0135 int in_blk_sz;
0136 int out_blk_sz;
0137
0138 struct spi_transfer *xfer;
0139 struct completion done;
0140 int error;
0141 int w_size;
0142 int n_words;
0143 int tx_bytes;
0144 int rx_bytes;
0145 const u8 *tx_buf;
0146 u8 *rx_buf;
0147 int qup_v1;
0148
0149 int mode;
0150 struct dma_slave_config rx_conf;
0151 struct dma_slave_config tx_conf;
0152 };
0153
0154 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
0155
0156 static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
0157 {
0158 u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
0159
0160 return (opflag & flag) != 0;
0161 }
0162
0163 static inline bool spi_qup_is_dma_xfer(int mode)
0164 {
0165 if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
0166 return true;
0167
0168 return false;
0169 }
0170
0171
0172 static inline unsigned int spi_qup_len(struct spi_qup *controller)
0173 {
0174 return controller->n_words * controller->w_size;
0175 }
0176
0177 static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
0178 {
0179 u32 opstate = readl_relaxed(controller->base + QUP_STATE);
0180
0181 return opstate & QUP_STATE_VALID;
0182 }
0183
0184 static int spi_qup_set_state(struct spi_qup *controller, u32 state)
0185 {
0186 unsigned long loop;
0187 u32 cur_state;
0188
0189 loop = 0;
0190 while (!spi_qup_is_valid_state(controller)) {
0191
0192 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
0193
0194 if (++loop > SPI_DELAY_RETRY)
0195 return -EIO;
0196 }
0197
0198 if (loop)
0199 dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
0200 loop, state);
0201
0202 cur_state = readl_relaxed(controller->base + QUP_STATE);
0203
0204
0205
0206
0207 if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
0208 (state == QUP_STATE_RESET)) {
0209 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
0210 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
0211 } else {
0212 cur_state &= ~QUP_STATE_MASK;
0213 cur_state |= state;
0214 writel_relaxed(cur_state, controller->base + QUP_STATE);
0215 }
0216
0217 loop = 0;
0218 while (!spi_qup_is_valid_state(controller)) {
0219
0220 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
0221
0222 if (++loop > SPI_DELAY_RETRY)
0223 return -EIO;
0224 }
0225
0226 return 0;
0227 }
0228
0229 static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
0230 {
0231 u8 *rx_buf = controller->rx_buf;
0232 int i, shift, num_bytes;
0233 u32 word;
0234
0235 for (; num_words; num_words--) {
0236
0237 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
0238
0239 num_bytes = min_t(int, spi_qup_len(controller) -
0240 controller->rx_bytes,
0241 controller->w_size);
0242
0243 if (!rx_buf) {
0244 controller->rx_bytes += num_bytes;
0245 continue;
0246 }
0247
0248 for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
0249
0250
0251
0252
0253
0254
0255 shift = BITS_PER_BYTE;
0256 shift *= (controller->w_size - i - 1);
0257 rx_buf[controller->rx_bytes] = word >> shift;
0258 }
0259 }
0260 }
0261
0262 static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
0263 {
0264 u32 remainder, words_per_block, num_words;
0265 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
0266
0267 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
0268 controller->w_size);
0269 words_per_block = controller->in_blk_sz >> 2;
0270
0271 do {
0272
0273 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
0274 controller->base + QUP_OPERATIONAL);
0275
0276 if (!remainder)
0277 goto exit;
0278
0279 if (is_block_mode) {
0280 num_words = (remainder > words_per_block) ?
0281 words_per_block : remainder;
0282 } else {
0283 if (!spi_qup_is_flag_set(controller,
0284 QUP_OP_IN_FIFO_NOT_EMPTY))
0285 break;
0286
0287 num_words = 1;
0288 }
0289
0290
0291 spi_qup_read_from_fifo(controller, num_words);
0292
0293 remainder -= num_words;
0294
0295
0296 if (is_block_mode && !spi_qup_is_flag_set(controller,
0297 QUP_OP_IN_BLOCK_READ_REQ))
0298 break;
0299
0300 } while (remainder);
0301
0302
0303
0304
0305
0306
0307
0308 exit:
0309 if (!remainder) {
0310 *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
0311 if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
0312 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
0313 controller->base + QUP_OPERATIONAL);
0314 }
0315 }
0316
0317 static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
0318 {
0319 const u8 *tx_buf = controller->tx_buf;
0320 int i, num_bytes;
0321 u32 word, data;
0322
0323 for (; num_words; num_words--) {
0324 word = 0;
0325
0326 num_bytes = min_t(int, spi_qup_len(controller) -
0327 controller->tx_bytes,
0328 controller->w_size);
0329 if (tx_buf)
0330 for (i = 0; i < num_bytes; i++) {
0331 data = tx_buf[controller->tx_bytes + i];
0332 word |= data << (BITS_PER_BYTE * (3 - i));
0333 }
0334
0335 controller->tx_bytes += num_bytes;
0336
0337 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
0338 }
0339 }
0340
0341 static void spi_qup_dma_done(void *data)
0342 {
0343 struct spi_qup *qup = data;
0344
0345 complete(&qup->done);
0346 }
0347
0348 static void spi_qup_write(struct spi_qup *controller)
0349 {
0350 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
0351 u32 remainder, words_per_block, num_words;
0352
0353 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
0354 controller->w_size);
0355 words_per_block = controller->out_blk_sz >> 2;
0356
0357 do {
0358
0359 writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
0360 controller->base + QUP_OPERATIONAL);
0361
0362
0363 if (!remainder)
0364 return;
0365
0366 if (is_block_mode) {
0367 num_words = (remainder > words_per_block) ?
0368 words_per_block : remainder;
0369 } else {
0370 if (spi_qup_is_flag_set(controller,
0371 QUP_OP_OUT_FIFO_FULL))
0372 break;
0373
0374 num_words = 1;
0375 }
0376
0377 spi_qup_write_to_fifo(controller, num_words);
0378
0379 remainder -= num_words;
0380
0381
0382 if (is_block_mode && !spi_qup_is_flag_set(controller,
0383 QUP_OP_OUT_BLOCK_WRITE_REQ))
0384 break;
0385
0386 } while (remainder);
0387 }
0388
0389 static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
0390 unsigned int nents, enum dma_transfer_direction dir,
0391 dma_async_tx_callback callback)
0392 {
0393 struct spi_qup *qup = spi_master_get_devdata(master);
0394 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
0395 struct dma_async_tx_descriptor *desc;
0396 struct dma_chan *chan;
0397 dma_cookie_t cookie;
0398
0399 if (dir == DMA_MEM_TO_DEV)
0400 chan = master->dma_tx;
0401 else
0402 chan = master->dma_rx;
0403
0404 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
0405 if (IS_ERR_OR_NULL(desc))
0406 return desc ? PTR_ERR(desc) : -EINVAL;
0407
0408 desc->callback = callback;
0409 desc->callback_param = qup;
0410
0411 cookie = dmaengine_submit(desc);
0412
0413 return dma_submit_error(cookie);
0414 }
0415
0416 static void spi_qup_dma_terminate(struct spi_master *master,
0417 struct spi_transfer *xfer)
0418 {
0419 if (xfer->tx_buf)
0420 dmaengine_terminate_all(master->dma_tx);
0421 if (xfer->rx_buf)
0422 dmaengine_terminate_all(master->dma_rx);
0423 }
0424
0425 static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
0426 u32 *nents)
0427 {
0428 struct scatterlist *sg;
0429 u32 total = 0;
0430
0431 for (sg = sgl; sg; sg = sg_next(sg)) {
0432 unsigned int len = sg_dma_len(sg);
0433
0434
0435 if (((total + len) < total) || ((total + len) > max))
0436 break;
0437
0438 total += len;
0439 (*nents)++;
0440 }
0441
0442 return total;
0443 }
0444
0445 static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
0446 unsigned long timeout)
0447 {
0448 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
0449 struct spi_master *master = spi->master;
0450 struct spi_qup *qup = spi_master_get_devdata(master);
0451 struct scatterlist *tx_sgl, *rx_sgl;
0452 int ret;
0453
0454 if (xfer->rx_buf)
0455 rx_done = spi_qup_dma_done;
0456 else if (xfer->tx_buf)
0457 tx_done = spi_qup_dma_done;
0458
0459 rx_sgl = xfer->rx_sg.sgl;
0460 tx_sgl = xfer->tx_sg.sgl;
0461
0462 do {
0463 u32 rx_nents = 0, tx_nents = 0;
0464
0465 if (rx_sgl)
0466 qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
0467 SPI_MAX_XFER, &rx_nents) / qup->w_size;
0468 if (tx_sgl)
0469 qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
0470 SPI_MAX_XFER, &tx_nents) / qup->w_size;
0471 if (!qup->n_words)
0472 return -EIO;
0473
0474 ret = spi_qup_io_config(spi, xfer);
0475 if (ret)
0476 return ret;
0477
0478
0479 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
0480 if (ret) {
0481 dev_warn(qup->dev, "cannot set RUN state\n");
0482 return ret;
0483 }
0484 if (rx_sgl) {
0485 ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
0486 DMA_DEV_TO_MEM, rx_done);
0487 if (ret)
0488 return ret;
0489 dma_async_issue_pending(master->dma_rx);
0490 }
0491
0492 if (tx_sgl) {
0493 ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
0494 DMA_MEM_TO_DEV, tx_done);
0495 if (ret)
0496 return ret;
0497
0498 dma_async_issue_pending(master->dma_tx);
0499 }
0500
0501 if (!wait_for_completion_timeout(&qup->done, timeout))
0502 return -ETIMEDOUT;
0503
0504 for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
0505 ;
0506 for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
0507 ;
0508
0509 } while (rx_sgl || tx_sgl);
0510
0511 return 0;
0512 }
0513
0514 static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
0515 unsigned long timeout)
0516 {
0517 struct spi_master *master = spi->master;
0518 struct spi_qup *qup = spi_master_get_devdata(master);
0519 int ret, n_words, iterations, offset = 0;
0520
0521 n_words = qup->n_words;
0522 iterations = n_words / SPI_MAX_XFER;
0523 qup->rx_buf = xfer->rx_buf;
0524 qup->tx_buf = xfer->tx_buf;
0525
0526 do {
0527 if (iterations)
0528 qup->n_words = SPI_MAX_XFER;
0529 else
0530 qup->n_words = n_words % SPI_MAX_XFER;
0531
0532 if (qup->tx_buf && offset)
0533 qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
0534
0535 if (qup->rx_buf && offset)
0536 qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
0537
0538
0539
0540
0541
0542 if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
0543 qup->mode = QUP_IO_M_MODE_FIFO;
0544
0545 ret = spi_qup_io_config(spi, xfer);
0546 if (ret)
0547 return ret;
0548
0549 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
0550 if (ret) {
0551 dev_warn(qup->dev, "cannot set RUN state\n");
0552 return ret;
0553 }
0554
0555 ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
0556 if (ret) {
0557 dev_warn(qup->dev, "cannot set PAUSE state\n");
0558 return ret;
0559 }
0560
0561 if (qup->mode == QUP_IO_M_MODE_FIFO)
0562 spi_qup_write(qup);
0563
0564 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
0565 if (ret) {
0566 dev_warn(qup->dev, "cannot set RUN state\n");
0567 return ret;
0568 }
0569
0570 if (!wait_for_completion_timeout(&qup->done, timeout))
0571 return -ETIMEDOUT;
0572
0573 offset++;
0574 } while (iterations--);
0575
0576 return 0;
0577 }
0578
0579 static bool spi_qup_data_pending(struct spi_qup *controller)
0580 {
0581 unsigned int remainder_tx, remainder_rx;
0582
0583 remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
0584 controller->tx_bytes, controller->w_size);
0585
0586 remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
0587 controller->rx_bytes, controller->w_size);
0588
0589 return remainder_tx || remainder_rx;
0590 }
0591
0592 static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
0593 {
0594 struct spi_qup *controller = dev_id;
0595 u32 opflags, qup_err, spi_err;
0596 int error = 0;
0597
0598 qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
0599 spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
0600 opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
0601
0602 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
0603 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
0604
0605 if (qup_err) {
0606 if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
0607 dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
0608 if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
0609 dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
0610 if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
0611 dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
0612 if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
0613 dev_warn(controller->dev, "INPUT_OVER_RUN\n");
0614
0615 error = -EIO;
0616 }
0617
0618 if (spi_err) {
0619 if (spi_err & SPI_ERROR_CLK_OVER_RUN)
0620 dev_warn(controller->dev, "CLK_OVER_RUN\n");
0621 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
0622 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
0623
0624 error = -EIO;
0625 }
0626
0627 spin_lock(&controller->lock);
0628 if (!controller->error)
0629 controller->error = error;
0630 spin_unlock(&controller->lock);
0631
0632 if (spi_qup_is_dma_xfer(controller->mode)) {
0633 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
0634 } else {
0635 if (opflags & QUP_OP_IN_SERVICE_FLAG)
0636 spi_qup_read(controller, &opflags);
0637
0638 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
0639 spi_qup_write(controller);
0640
0641 if (!spi_qup_data_pending(controller))
0642 complete(&controller->done);
0643 }
0644
0645 if (error)
0646 complete(&controller->done);
0647
0648 if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
0649 if (!spi_qup_is_dma_xfer(controller->mode)) {
0650 if (spi_qup_data_pending(controller))
0651 return IRQ_HANDLED;
0652 }
0653 complete(&controller->done);
0654 }
0655
0656 return IRQ_HANDLED;
0657 }
0658
0659
0660 static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
0661 {
0662 struct spi_qup *controller = spi_master_get_devdata(spi->master);
0663 int ret;
0664
0665 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
0666 dev_err(controller->dev, "too big size for loopback %d > %d\n",
0667 xfer->len, controller->in_fifo_sz);
0668 return -EIO;
0669 }
0670
0671 ret = clk_set_rate(controller->cclk, xfer->speed_hz);
0672 if (ret) {
0673 dev_err(controller->dev, "fail to set frequency %d",
0674 xfer->speed_hz);
0675 return -EIO;
0676 }
0677
0678 controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
0679 controller->n_words = xfer->len / controller->w_size;
0680
0681 if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
0682 controller->mode = QUP_IO_M_MODE_FIFO;
0683 else if (spi->master->can_dma &&
0684 spi->master->can_dma(spi->master, spi, xfer) &&
0685 spi->master->cur_msg_mapped)
0686 controller->mode = QUP_IO_M_MODE_BAM;
0687 else
0688 controller->mode = QUP_IO_M_MODE_BLOCK;
0689
0690 return 0;
0691 }
0692
0693
0694 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
0695 {
0696 struct spi_qup *controller = spi_master_get_devdata(spi->master);
0697 u32 config, iomode, control;
0698 unsigned long flags;
0699
0700 spin_lock_irqsave(&controller->lock, flags);
0701 controller->xfer = xfer;
0702 controller->error = 0;
0703 controller->rx_bytes = 0;
0704 controller->tx_bytes = 0;
0705 spin_unlock_irqrestore(&controller->lock, flags);
0706
0707
0708 if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
0709 dev_err(controller->dev, "cannot set RESET state\n");
0710 return -EIO;
0711 }
0712
0713 switch (controller->mode) {
0714 case QUP_IO_M_MODE_FIFO:
0715 writel_relaxed(controller->n_words,
0716 controller->base + QUP_MX_READ_CNT);
0717 writel_relaxed(controller->n_words,
0718 controller->base + QUP_MX_WRITE_CNT);
0719
0720 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
0721 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
0722 break;
0723 case QUP_IO_M_MODE_BAM:
0724 writel_relaxed(controller->n_words,
0725 controller->base + QUP_MX_INPUT_CNT);
0726 writel_relaxed(controller->n_words,
0727 controller->base + QUP_MX_OUTPUT_CNT);
0728
0729 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
0730 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
0731
0732 if (!controller->qup_v1) {
0733 void __iomem *input_cnt;
0734
0735 input_cnt = controller->base + QUP_MX_INPUT_CNT;
0736
0737
0738
0739
0740
0741
0742 if (xfer->tx_buf)
0743 writel_relaxed(0, input_cnt);
0744 else
0745 writel_relaxed(controller->n_words, input_cnt);
0746
0747 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
0748 }
0749 break;
0750 case QUP_IO_M_MODE_BLOCK:
0751 reinit_completion(&controller->done);
0752 writel_relaxed(controller->n_words,
0753 controller->base + QUP_MX_INPUT_CNT);
0754 writel_relaxed(controller->n_words,
0755 controller->base + QUP_MX_OUTPUT_CNT);
0756
0757 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
0758 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
0759 break;
0760 default:
0761 dev_err(controller->dev, "unknown mode = %d\n",
0762 controller->mode);
0763 return -EIO;
0764 }
0765
0766 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
0767
0768 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
0769
0770 if (!spi_qup_is_dma_xfer(controller->mode))
0771 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
0772 else
0773 iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
0774
0775 iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
0776 iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
0777
0778 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
0779
0780 control = readl_relaxed(controller->base + SPI_IO_CONTROL);
0781
0782 if (spi->mode & SPI_CPOL)
0783 control |= SPI_IO_C_CLK_IDLE_HIGH;
0784 else
0785 control &= ~SPI_IO_C_CLK_IDLE_HIGH;
0786
0787 writel_relaxed(control, controller->base + SPI_IO_CONTROL);
0788
0789 config = readl_relaxed(controller->base + SPI_CONFIG);
0790
0791 if (spi->mode & SPI_LOOP)
0792 config |= SPI_CONFIG_LOOPBACK;
0793 else
0794 config &= ~SPI_CONFIG_LOOPBACK;
0795
0796 if (spi->mode & SPI_CPHA)
0797 config &= ~SPI_CONFIG_INPUT_FIRST;
0798 else
0799 config |= SPI_CONFIG_INPUT_FIRST;
0800
0801
0802
0803
0804
0805 if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
0806 config |= SPI_CONFIG_HS_MODE;
0807 else
0808 config &= ~SPI_CONFIG_HS_MODE;
0809
0810 writel_relaxed(config, controller->base + SPI_CONFIG);
0811
0812 config = readl_relaxed(controller->base + QUP_CONFIG);
0813 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
0814 config |= xfer->bits_per_word - 1;
0815 config |= QUP_CONFIG_SPI_MODE;
0816
0817 if (spi_qup_is_dma_xfer(controller->mode)) {
0818 if (!xfer->tx_buf)
0819 config |= QUP_CONFIG_NO_OUTPUT;
0820 if (!xfer->rx_buf)
0821 config |= QUP_CONFIG_NO_INPUT;
0822 }
0823
0824 writel_relaxed(config, controller->base + QUP_CONFIG);
0825
0826
0827 if (!controller->qup_v1) {
0828 u32 mask = 0;
0829
0830
0831
0832
0833
0834
0835 if (spi_qup_is_dma_xfer(controller->mode))
0836 mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
0837
0838 writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
0839 }
0840
0841 return 0;
0842 }
0843
0844 static int spi_qup_transfer_one(struct spi_master *master,
0845 struct spi_device *spi,
0846 struct spi_transfer *xfer)
0847 {
0848 struct spi_qup *controller = spi_master_get_devdata(master);
0849 unsigned long timeout, flags;
0850 int ret;
0851
0852 ret = spi_qup_io_prep(spi, xfer);
0853 if (ret)
0854 return ret;
0855
0856 timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
0857 timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
0858 xfer->len) * 8, timeout);
0859 timeout = 100 * msecs_to_jiffies(timeout);
0860
0861 reinit_completion(&controller->done);
0862
0863 spin_lock_irqsave(&controller->lock, flags);
0864 controller->xfer = xfer;
0865 controller->error = 0;
0866 controller->rx_bytes = 0;
0867 controller->tx_bytes = 0;
0868 spin_unlock_irqrestore(&controller->lock, flags);
0869
0870 if (spi_qup_is_dma_xfer(controller->mode))
0871 ret = spi_qup_do_dma(spi, xfer, timeout);
0872 else
0873 ret = spi_qup_do_pio(spi, xfer, timeout);
0874
0875 spi_qup_set_state(controller, QUP_STATE_RESET);
0876 spin_lock_irqsave(&controller->lock, flags);
0877 if (!ret)
0878 ret = controller->error;
0879 spin_unlock_irqrestore(&controller->lock, flags);
0880
0881 if (ret && spi_qup_is_dma_xfer(controller->mode))
0882 spi_qup_dma_terminate(master, xfer);
0883
0884 return ret;
0885 }
0886
0887 static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
0888 struct spi_transfer *xfer)
0889 {
0890 struct spi_qup *qup = spi_master_get_devdata(master);
0891 size_t dma_align = dma_get_cache_alignment();
0892 int n_words;
0893
0894 if (xfer->rx_buf) {
0895 if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
0896 IS_ERR_OR_NULL(master->dma_rx))
0897 return false;
0898 if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
0899 return false;
0900 }
0901
0902 if (xfer->tx_buf) {
0903 if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
0904 IS_ERR_OR_NULL(master->dma_tx))
0905 return false;
0906 if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
0907 return false;
0908 }
0909
0910 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
0911 if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
0912 return false;
0913
0914 return true;
0915 }
0916
0917 static void spi_qup_release_dma(struct spi_master *master)
0918 {
0919 if (!IS_ERR_OR_NULL(master->dma_rx))
0920 dma_release_channel(master->dma_rx);
0921 if (!IS_ERR_OR_NULL(master->dma_tx))
0922 dma_release_channel(master->dma_tx);
0923 }
0924
0925 static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
0926 {
0927 struct spi_qup *spi = spi_master_get_devdata(master);
0928 struct dma_slave_config *rx_conf = &spi->rx_conf,
0929 *tx_conf = &spi->tx_conf;
0930 struct device *dev = spi->dev;
0931 int ret;
0932
0933
0934 master->dma_rx = dma_request_chan(dev, "rx");
0935 if (IS_ERR(master->dma_rx))
0936 return PTR_ERR(master->dma_rx);
0937
0938 master->dma_tx = dma_request_chan(dev, "tx");
0939 if (IS_ERR(master->dma_tx)) {
0940 ret = PTR_ERR(master->dma_tx);
0941 goto err_tx;
0942 }
0943
0944
0945 rx_conf->direction = DMA_DEV_TO_MEM;
0946 rx_conf->device_fc = 1;
0947 rx_conf->src_addr = base + QUP_INPUT_FIFO;
0948 rx_conf->src_maxburst = spi->in_blk_sz;
0949
0950 tx_conf->direction = DMA_MEM_TO_DEV;
0951 tx_conf->device_fc = 1;
0952 tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
0953 tx_conf->dst_maxburst = spi->out_blk_sz;
0954
0955 ret = dmaengine_slave_config(master->dma_rx, rx_conf);
0956 if (ret) {
0957 dev_err(dev, "failed to configure RX channel\n");
0958 goto err;
0959 }
0960
0961 ret = dmaengine_slave_config(master->dma_tx, tx_conf);
0962 if (ret) {
0963 dev_err(dev, "failed to configure TX channel\n");
0964 goto err;
0965 }
0966
0967 return 0;
0968
0969 err:
0970 dma_release_channel(master->dma_tx);
0971 err_tx:
0972 dma_release_channel(master->dma_rx);
0973 return ret;
0974 }
0975
0976 static void spi_qup_set_cs(struct spi_device *spi, bool val)
0977 {
0978 struct spi_qup *controller;
0979 u32 spi_ioc;
0980 u32 spi_ioc_orig;
0981
0982 controller = spi_master_get_devdata(spi->master);
0983 spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
0984 spi_ioc_orig = spi_ioc;
0985 if (!val)
0986 spi_ioc |= SPI_IO_C_FORCE_CS;
0987 else
0988 spi_ioc &= ~SPI_IO_C_FORCE_CS;
0989
0990 if (spi_ioc != spi_ioc_orig)
0991 writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
0992 }
0993
0994 static int spi_qup_probe(struct platform_device *pdev)
0995 {
0996 struct spi_master *master;
0997 struct clk *iclk, *cclk;
0998 struct spi_qup *controller;
0999 struct resource *res;
1000 struct device *dev;
1001 void __iomem *base;
1002 u32 max_freq, iomode, num_cs;
1003 int ret, irq, size;
1004
1005 dev = &pdev->dev;
1006 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1007 base = devm_ioremap_resource(dev, res);
1008 if (IS_ERR(base))
1009 return PTR_ERR(base);
1010
1011 irq = platform_get_irq(pdev, 0);
1012 if (irq < 0)
1013 return irq;
1014
1015 cclk = devm_clk_get(dev, "core");
1016 if (IS_ERR(cclk))
1017 return PTR_ERR(cclk);
1018
1019 iclk = devm_clk_get(dev, "iface");
1020 if (IS_ERR(iclk))
1021 return PTR_ERR(iclk);
1022
1023
1024 if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
1025 max_freq = SPI_MAX_RATE;
1026
1027 if (!max_freq || max_freq > SPI_MAX_RATE) {
1028 dev_err(dev, "invalid clock frequency %d\n", max_freq);
1029 return -ENXIO;
1030 }
1031
1032 ret = clk_prepare_enable(cclk);
1033 if (ret) {
1034 dev_err(dev, "cannot enable core clock\n");
1035 return ret;
1036 }
1037
1038 ret = clk_prepare_enable(iclk);
1039 if (ret) {
1040 clk_disable_unprepare(cclk);
1041 dev_err(dev, "cannot enable iface clock\n");
1042 return ret;
1043 }
1044
1045 master = spi_alloc_master(dev, sizeof(struct spi_qup));
1046 if (!master) {
1047 clk_disable_unprepare(cclk);
1048 clk_disable_unprepare(iclk);
1049 dev_err(dev, "cannot allocate master\n");
1050 return -ENOMEM;
1051 }
1052
1053
1054 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1055 num_cs > SPI_NUM_CHIPSELECTS)
1056 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1057 else
1058 master->num_chipselect = num_cs;
1059
1060 master->bus_num = pdev->id;
1061 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1062 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1063 master->max_speed_hz = max_freq;
1064 master->transfer_one = spi_qup_transfer_one;
1065 master->dev.of_node = pdev->dev.of_node;
1066 master->auto_runtime_pm = true;
1067 master->dma_alignment = dma_get_cache_alignment();
1068 master->max_dma_len = SPI_MAX_XFER;
1069
1070 platform_set_drvdata(pdev, master);
1071
1072 controller = spi_master_get_devdata(master);
1073
1074 controller->dev = dev;
1075 controller->base = base;
1076 controller->iclk = iclk;
1077 controller->cclk = cclk;
1078 controller->irq = irq;
1079
1080 ret = spi_qup_init_dma(master, res->start);
1081 if (ret == -EPROBE_DEFER)
1082 goto error;
1083 else if (!ret)
1084 master->can_dma = spi_qup_can_dma;
1085
1086 controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1087
1088 if (!controller->qup_v1)
1089 master->set_cs = spi_qup_set_cs;
1090
1091 spin_lock_init(&controller->lock);
1092 init_completion(&controller->done);
1093
1094 iomode = readl_relaxed(base + QUP_IO_M_MODES);
1095
1096 size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1097 if (size)
1098 controller->out_blk_sz = size * 16;
1099 else
1100 controller->out_blk_sz = 4;
1101
1102 size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1103 if (size)
1104 controller->in_blk_sz = size * 16;
1105 else
1106 controller->in_blk_sz = 4;
1107
1108 size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1109 controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1110
1111 size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1112 controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1113
1114 dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1115 controller->in_blk_sz, controller->in_fifo_sz,
1116 controller->out_blk_sz, controller->out_fifo_sz);
1117
1118 writel_relaxed(1, base + QUP_SW_RESET);
1119
1120 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1121 if (ret) {
1122 dev_err(dev, "cannot set RESET state\n");
1123 goto error_dma;
1124 }
1125
1126 writel_relaxed(0, base + QUP_OPERATIONAL);
1127 writel_relaxed(0, base + QUP_IO_M_MODES);
1128
1129 if (!controller->qup_v1)
1130 writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1131
1132 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1133 base + SPI_ERROR_FLAGS_EN);
1134
1135
1136 if (controller->qup_v1)
1137 writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1138 QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1139 base + QUP_ERROR_FLAGS_EN);
1140
1141 writel_relaxed(0, base + SPI_CONFIG);
1142 writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1143
1144 ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1145 IRQF_TRIGGER_HIGH, pdev->name, controller);
1146 if (ret)
1147 goto error_dma;
1148
1149 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1150 pm_runtime_use_autosuspend(dev);
1151 pm_runtime_set_active(dev);
1152 pm_runtime_enable(dev);
1153
1154 ret = devm_spi_register_master(dev, master);
1155 if (ret)
1156 goto disable_pm;
1157
1158 return 0;
1159
1160 disable_pm:
1161 pm_runtime_disable(&pdev->dev);
1162 error_dma:
1163 spi_qup_release_dma(master);
1164 error:
1165 clk_disable_unprepare(cclk);
1166 clk_disable_unprepare(iclk);
1167 spi_master_put(master);
1168 return ret;
1169 }
1170
1171 #ifdef CONFIG_PM
1172 static int spi_qup_pm_suspend_runtime(struct device *device)
1173 {
1174 struct spi_master *master = dev_get_drvdata(device);
1175 struct spi_qup *controller = spi_master_get_devdata(master);
1176 u32 config;
1177
1178
1179 config = readl(controller->base + QUP_CONFIG);
1180 config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1181 writel_relaxed(config, controller->base + QUP_CONFIG);
1182
1183 clk_disable_unprepare(controller->cclk);
1184 clk_disable_unprepare(controller->iclk);
1185
1186 return 0;
1187 }
1188
1189 static int spi_qup_pm_resume_runtime(struct device *device)
1190 {
1191 struct spi_master *master = dev_get_drvdata(device);
1192 struct spi_qup *controller = spi_master_get_devdata(master);
1193 u32 config;
1194 int ret;
1195
1196 ret = clk_prepare_enable(controller->iclk);
1197 if (ret)
1198 return ret;
1199
1200 ret = clk_prepare_enable(controller->cclk);
1201 if (ret)
1202 return ret;
1203
1204
1205 config = readl_relaxed(controller->base + QUP_CONFIG);
1206 config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1207 writel_relaxed(config, controller->base + QUP_CONFIG);
1208 return 0;
1209 }
1210 #endif
1211
1212 #ifdef CONFIG_PM_SLEEP
1213 static int spi_qup_suspend(struct device *device)
1214 {
1215 struct spi_master *master = dev_get_drvdata(device);
1216 struct spi_qup *controller = spi_master_get_devdata(master);
1217 int ret;
1218
1219 if (pm_runtime_suspended(device)) {
1220 ret = spi_qup_pm_resume_runtime(device);
1221 if (ret)
1222 return ret;
1223 }
1224 ret = spi_master_suspend(master);
1225 if (ret)
1226 return ret;
1227
1228 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1229 if (ret)
1230 return ret;
1231
1232 clk_disable_unprepare(controller->cclk);
1233 clk_disable_unprepare(controller->iclk);
1234 return 0;
1235 }
1236
1237 static int spi_qup_resume(struct device *device)
1238 {
1239 struct spi_master *master = dev_get_drvdata(device);
1240 struct spi_qup *controller = spi_master_get_devdata(master);
1241 int ret;
1242
1243 ret = clk_prepare_enable(controller->iclk);
1244 if (ret)
1245 return ret;
1246
1247 ret = clk_prepare_enable(controller->cclk);
1248 if (ret)
1249 return ret;
1250
1251 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1252 if (ret)
1253 return ret;
1254
1255 return spi_master_resume(master);
1256 }
1257 #endif
1258
1259 static int spi_qup_remove(struct platform_device *pdev)
1260 {
1261 struct spi_master *master = dev_get_drvdata(&pdev->dev);
1262 struct spi_qup *controller = spi_master_get_devdata(master);
1263 int ret;
1264
1265 ret = pm_runtime_resume_and_get(&pdev->dev);
1266 if (ret < 0)
1267 return ret;
1268
1269 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1270 if (ret)
1271 return ret;
1272
1273 spi_qup_release_dma(master);
1274
1275 clk_disable_unprepare(controller->cclk);
1276 clk_disable_unprepare(controller->iclk);
1277
1278 pm_runtime_put_noidle(&pdev->dev);
1279 pm_runtime_disable(&pdev->dev);
1280
1281 return 0;
1282 }
1283
1284 static const struct of_device_id spi_qup_dt_match[] = {
1285 { .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1286 { .compatible = "qcom,spi-qup-v2.1.1", },
1287 { .compatible = "qcom,spi-qup-v2.2.1", },
1288 { }
1289 };
1290 MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1291
1292 static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1293 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1294 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1295 spi_qup_pm_resume_runtime,
1296 NULL)
1297 };
1298
1299 static struct platform_driver spi_qup_driver = {
1300 .driver = {
1301 .name = "spi_qup",
1302 .pm = &spi_qup_dev_pm_ops,
1303 .of_match_table = spi_qup_dt_match,
1304 },
1305 .probe = spi_qup_probe,
1306 .remove = spi_qup_remove,
1307 };
1308 module_platform_driver(spi_qup_driver);
1309
1310 MODULE_LICENSE("GPL v2");
1311 MODULE_ALIAS("platform:spi_qup");