0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/init.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/module.h>
0013 #include <linux/device.h>
0014 #include <linux/delay.h>
0015 #include <linux/dma-mapping.h>
0016 #include <linux/dmaengine.h>
0017 #include <linux/omap-dma.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/err.h>
0020 #include <linux/clk.h>
0021 #include <linux/io.h>
0022 #include <linux/slab.h>
0023 #include <linux/pm_runtime.h>
0024 #include <linux/of.h>
0025 #include <linux/of_device.h>
0026 #include <linux/pinctrl/consumer.h>
0027 #include <linux/mfd/syscon.h>
0028 #include <linux/regmap.h>
0029 #include <linux/sizes.h>
0030
0031 #include <linux/spi/spi.h>
0032 #include <linux/spi/spi-mem.h>
0033
0034 struct ti_qspi_regs {
0035 u32 clkctrl;
0036 };
0037
0038 struct ti_qspi {
0039 struct completion transfer_complete;
0040
0041
0042 struct mutex list_lock;
0043
0044 struct spi_master *master;
0045 void __iomem *base;
0046 void __iomem *mmap_base;
0047 size_t mmap_size;
0048 struct regmap *ctrl_base;
0049 unsigned int ctrl_reg;
0050 struct clk *fclk;
0051 struct device *dev;
0052
0053 struct ti_qspi_regs ctx_reg;
0054
0055 dma_addr_t mmap_phys_base;
0056 dma_addr_t rx_bb_dma_addr;
0057 void *rx_bb_addr;
0058 struct dma_chan *rx_chan;
0059
0060 u32 cmd;
0061 u32 dc;
0062
0063 bool mmap_enabled;
0064 int current_cs;
0065 };
0066
0067 #define QSPI_PID (0x0)
0068 #define QSPI_SYSCONFIG (0x10)
0069 #define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
0070 #define QSPI_SPI_DC_REG (0x44)
0071 #define QSPI_SPI_CMD_REG (0x48)
0072 #define QSPI_SPI_STATUS_REG (0x4c)
0073 #define QSPI_SPI_DATA_REG (0x50)
0074 #define QSPI_SPI_SETUP_REG(n) ((0x54 + 4 * n))
0075 #define QSPI_SPI_SWITCH_REG (0x64)
0076 #define QSPI_SPI_DATA_REG_1 (0x68)
0077 #define QSPI_SPI_DATA_REG_2 (0x6c)
0078 #define QSPI_SPI_DATA_REG_3 (0x70)
0079
0080 #define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
0081
0082
0083 #define QSPI_CLK_EN (1 << 31)
0084 #define QSPI_CLK_DIV_MAX 0xffff
0085
0086
0087 #define QSPI_EN_CS(n) (n << 28)
0088 #define QSPI_WLEN(n) ((n - 1) << 19)
0089 #define QSPI_3_PIN (1 << 18)
0090 #define QSPI_RD_SNGL (1 << 16)
0091 #define QSPI_WR_SNGL (2 << 16)
0092 #define QSPI_RD_DUAL (3 << 16)
0093 #define QSPI_RD_QUAD (7 << 16)
0094 #define QSPI_INVAL (4 << 16)
0095 #define QSPI_FLEN(n) ((n - 1) << 0)
0096 #define QSPI_WLEN_MAX_BITS 128
0097 #define QSPI_WLEN_MAX_BYTES 16
0098 #define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
0099
0100
0101 #define BUSY 0x01
0102 #define WC 0x02
0103
0104
0105 #define QSPI_DD(m, n) (m << (3 + n * 8))
0106 #define QSPI_CKPHA(n) (1 << (2 + n * 8))
0107 #define QSPI_CSPOL(n) (1 << (1 + n * 8))
0108 #define QSPI_CKPOL(n) (1 << (n * 8))
0109
0110 #define QSPI_FRAME 4096
0111
0112 #define QSPI_AUTOSUSPEND_TIMEOUT 2000
0113
0114 #define MEM_CS_EN(n) ((n + 1) << 8)
0115 #define MEM_CS_MASK (7 << 8)
0116
0117 #define MM_SWITCH 0x1
0118
0119 #define QSPI_SETUP_RD_NORMAL (0x0 << 12)
0120 #define QSPI_SETUP_RD_DUAL (0x1 << 12)
0121 #define QSPI_SETUP_RD_QUAD (0x3 << 12)
0122 #define QSPI_SETUP_ADDR_SHIFT 8
0123 #define QSPI_SETUP_DUMMY_SHIFT 10
0124
0125 #define QSPI_DMA_BUFFER_SIZE SZ_64K
0126
0127 static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
0128 unsigned long reg)
0129 {
0130 return readl(qspi->base + reg);
0131 }
0132
0133 static inline void ti_qspi_write(struct ti_qspi *qspi,
0134 unsigned long val, unsigned long reg)
0135 {
0136 writel(val, qspi->base + reg);
0137 }
0138
0139 static int ti_qspi_setup(struct spi_device *spi)
0140 {
0141 struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
0142 int ret;
0143
0144 if (spi->master->busy) {
0145 dev_dbg(qspi->dev, "master busy doing other transfers\n");
0146 return -EBUSY;
0147 }
0148
0149 if (!qspi->master->max_speed_hz) {
0150 dev_err(qspi->dev, "spi max frequency not defined\n");
0151 return -EINVAL;
0152 }
0153
0154 spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz);
0155
0156 ret = pm_runtime_resume_and_get(qspi->dev);
0157 if (ret < 0) {
0158 dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
0159 return ret;
0160 }
0161
0162 pm_runtime_mark_last_busy(qspi->dev);
0163 ret = pm_runtime_put_autosuspend(qspi->dev);
0164 if (ret < 0) {
0165 dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
0166 return ret;
0167 }
0168
0169 return 0;
0170 }
0171
0172 static void ti_qspi_setup_clk(struct ti_qspi *qspi, u32 speed_hz)
0173 {
0174 struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
0175 int clk_div;
0176 u32 clk_ctrl_reg, clk_rate, clk_ctrl_new;
0177
0178 clk_rate = clk_get_rate(qspi->fclk);
0179 clk_div = DIV_ROUND_UP(clk_rate, speed_hz) - 1;
0180 clk_div = clamp(clk_div, 0, QSPI_CLK_DIV_MAX);
0181 dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", speed_hz, clk_div);
0182
0183 pm_runtime_resume_and_get(qspi->dev);
0184
0185 clk_ctrl_new = QSPI_CLK_EN | clk_div;
0186 if (ctx_reg->clkctrl != clk_ctrl_new) {
0187 clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
0188
0189 clk_ctrl_reg &= ~QSPI_CLK_EN;
0190
0191
0192 ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
0193
0194
0195 ti_qspi_write(qspi, clk_ctrl_new, QSPI_SPI_CLOCK_CNTRL_REG);
0196 ctx_reg->clkctrl = clk_ctrl_new;
0197 }
0198
0199 pm_runtime_mark_last_busy(qspi->dev);
0200 pm_runtime_put_autosuspend(qspi->dev);
0201 }
0202
0203 static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
0204 {
0205 struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
0206
0207 ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
0208 }
0209
0210 static inline u32 qspi_is_busy(struct ti_qspi *qspi)
0211 {
0212 u32 stat;
0213 unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
0214
0215 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
0216 while ((stat & BUSY) && time_after(timeout, jiffies)) {
0217 cpu_relax();
0218 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
0219 }
0220
0221 WARN(stat & BUSY, "qspi busy\n");
0222 return stat & BUSY;
0223 }
0224
0225 static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
0226 {
0227 u32 stat;
0228 unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
0229
0230 do {
0231 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
0232 if (stat & WC)
0233 return 0;
0234 cpu_relax();
0235 } while (time_after(timeout, jiffies));
0236
0237 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
0238 if (stat & WC)
0239 return 0;
0240 return -ETIMEDOUT;
0241 }
0242
0243 static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
0244 int count)
0245 {
0246 int wlen, xfer_len;
0247 unsigned int cmd;
0248 const u8 *txbuf;
0249 u32 data;
0250
0251 txbuf = t->tx_buf;
0252 cmd = qspi->cmd | QSPI_WR_SNGL;
0253 wlen = t->bits_per_word >> 3;
0254 xfer_len = wlen;
0255
0256 while (count) {
0257 if (qspi_is_busy(qspi))
0258 return -EBUSY;
0259
0260 switch (wlen) {
0261 case 1:
0262 dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
0263 cmd, qspi->dc, *txbuf);
0264 if (count >= QSPI_WLEN_MAX_BYTES) {
0265 u32 *txp = (u32 *)txbuf;
0266
0267 data = cpu_to_be32(*txp++);
0268 writel(data, qspi->base +
0269 QSPI_SPI_DATA_REG_3);
0270 data = cpu_to_be32(*txp++);
0271 writel(data, qspi->base +
0272 QSPI_SPI_DATA_REG_2);
0273 data = cpu_to_be32(*txp++);
0274 writel(data, qspi->base +
0275 QSPI_SPI_DATA_REG_1);
0276 data = cpu_to_be32(*txp++);
0277 writel(data, qspi->base +
0278 QSPI_SPI_DATA_REG);
0279 xfer_len = QSPI_WLEN_MAX_BYTES;
0280 cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
0281 } else {
0282 writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
0283 cmd = qspi->cmd | QSPI_WR_SNGL;
0284 xfer_len = wlen;
0285 cmd |= QSPI_WLEN(wlen);
0286 }
0287 break;
0288 case 2:
0289 dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
0290 cmd, qspi->dc, *txbuf);
0291 writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
0292 break;
0293 case 4:
0294 dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
0295 cmd, qspi->dc, *txbuf);
0296 writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
0297 break;
0298 }
0299
0300 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
0301 if (ti_qspi_poll_wc(qspi)) {
0302 dev_err(qspi->dev, "write timed out\n");
0303 return -ETIMEDOUT;
0304 }
0305 txbuf += xfer_len;
0306 count -= xfer_len;
0307 }
0308
0309 return 0;
0310 }
0311
0312 static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
0313 int count)
0314 {
0315 int wlen;
0316 unsigned int cmd;
0317 u32 rx;
0318 u8 rxlen, rx_wlen;
0319 u8 *rxbuf;
0320
0321 rxbuf = t->rx_buf;
0322 cmd = qspi->cmd;
0323 switch (t->rx_nbits) {
0324 case SPI_NBITS_DUAL:
0325 cmd |= QSPI_RD_DUAL;
0326 break;
0327 case SPI_NBITS_QUAD:
0328 cmd |= QSPI_RD_QUAD;
0329 break;
0330 default:
0331 cmd |= QSPI_RD_SNGL;
0332 break;
0333 }
0334 wlen = t->bits_per_word >> 3;
0335 rx_wlen = wlen;
0336
0337 while (count) {
0338 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
0339 if (qspi_is_busy(qspi))
0340 return -EBUSY;
0341
0342 switch (wlen) {
0343 case 1:
0344
0345
0346
0347
0348 if (count >= QSPI_WLEN_MAX_BYTES) {
0349 rxlen = QSPI_WLEN_MAX_BYTES;
0350 } else {
0351 rxlen = min(count, 4);
0352 }
0353 rx_wlen = rxlen << 3;
0354 cmd &= ~QSPI_WLEN_MASK;
0355 cmd |= QSPI_WLEN(rx_wlen);
0356 break;
0357 default:
0358 rxlen = wlen;
0359 break;
0360 }
0361
0362 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
0363 if (ti_qspi_poll_wc(qspi)) {
0364 dev_err(qspi->dev, "read timed out\n");
0365 return -ETIMEDOUT;
0366 }
0367
0368 switch (wlen) {
0369 case 1:
0370
0371
0372
0373
0374 if (count >= QSPI_WLEN_MAX_BYTES) {
0375 u32 *rxp = (u32 *) rxbuf;
0376 rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
0377 *rxp++ = be32_to_cpu(rx);
0378 rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
0379 *rxp++ = be32_to_cpu(rx);
0380 rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
0381 *rxp++ = be32_to_cpu(rx);
0382 rx = readl(qspi->base + QSPI_SPI_DATA_REG);
0383 *rxp++ = be32_to_cpu(rx);
0384 } else {
0385 u8 *rxp = rxbuf;
0386 rx = readl(qspi->base + QSPI_SPI_DATA_REG);
0387 if (rx_wlen >= 8)
0388 *rxp++ = rx >> (rx_wlen - 8);
0389 if (rx_wlen >= 16)
0390 *rxp++ = rx >> (rx_wlen - 16);
0391 if (rx_wlen >= 24)
0392 *rxp++ = rx >> (rx_wlen - 24);
0393 if (rx_wlen >= 32)
0394 *rxp++ = rx;
0395 }
0396 break;
0397 case 2:
0398 *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
0399 break;
0400 case 4:
0401 *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
0402 break;
0403 }
0404 rxbuf += rxlen;
0405 count -= rxlen;
0406 }
0407
0408 return 0;
0409 }
0410
0411 static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
0412 int count)
0413 {
0414 int ret;
0415
0416 if (t->tx_buf) {
0417 ret = qspi_write_msg(qspi, t, count);
0418 if (ret) {
0419 dev_dbg(qspi->dev, "Error while writing\n");
0420 return ret;
0421 }
0422 }
0423
0424 if (t->rx_buf) {
0425 ret = qspi_read_msg(qspi, t, count);
0426 if (ret) {
0427 dev_dbg(qspi->dev, "Error while reading\n");
0428 return ret;
0429 }
0430 }
0431
0432 return 0;
0433 }
0434
0435 static void ti_qspi_dma_callback(void *param)
0436 {
0437 struct ti_qspi *qspi = param;
0438
0439 complete(&qspi->transfer_complete);
0440 }
0441
0442 static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
0443 dma_addr_t dma_src, size_t len)
0444 {
0445 struct dma_chan *chan = qspi->rx_chan;
0446 dma_cookie_t cookie;
0447 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
0448 struct dma_async_tx_descriptor *tx;
0449 int ret;
0450 unsigned long time_left;
0451
0452 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
0453 if (!tx) {
0454 dev_err(qspi->dev, "device_prep_dma_memcpy error\n");
0455 return -EIO;
0456 }
0457
0458 tx->callback = ti_qspi_dma_callback;
0459 tx->callback_param = qspi;
0460 cookie = tx->tx_submit(tx);
0461 reinit_completion(&qspi->transfer_complete);
0462
0463 ret = dma_submit_error(cookie);
0464 if (ret) {
0465 dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
0466 return -EIO;
0467 }
0468
0469 dma_async_issue_pending(chan);
0470 time_left = wait_for_completion_timeout(&qspi->transfer_complete,
0471 msecs_to_jiffies(len));
0472 if (time_left == 0) {
0473 dmaengine_terminate_sync(chan);
0474 dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
0475 return -ETIMEDOUT;
0476 }
0477
0478 return 0;
0479 }
0480
0481 static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs,
0482 void *to, size_t readsize)
0483 {
0484 dma_addr_t dma_src = qspi->mmap_phys_base + offs;
0485 int ret = 0;
0486
0487
0488
0489
0490
0491 while (readsize != 0) {
0492 size_t xfer_len = min_t(size_t, QSPI_DMA_BUFFER_SIZE,
0493 readsize);
0494
0495 ret = ti_qspi_dma_xfer(qspi, qspi->rx_bb_dma_addr,
0496 dma_src, xfer_len);
0497 if (ret != 0)
0498 return ret;
0499 memcpy(to, qspi->rx_bb_addr, xfer_len);
0500 readsize -= xfer_len;
0501 dma_src += xfer_len;
0502 to += xfer_len;
0503 }
0504
0505 return ret;
0506 }
0507
0508 static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
0509 loff_t from)
0510 {
0511 struct scatterlist *sg;
0512 dma_addr_t dma_src = qspi->mmap_phys_base + from;
0513 dma_addr_t dma_dst;
0514 int i, len, ret;
0515
0516 for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
0517 dma_dst = sg_dma_address(sg);
0518 len = sg_dma_len(sg);
0519 ret = ti_qspi_dma_xfer(qspi, dma_dst, dma_src, len);
0520 if (ret)
0521 return ret;
0522 dma_src += len;
0523 }
0524
0525 return 0;
0526 }
0527
0528 static void ti_qspi_enable_memory_map(struct spi_device *spi)
0529 {
0530 struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
0531
0532 ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
0533 if (qspi->ctrl_base) {
0534 regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
0535 MEM_CS_MASK,
0536 MEM_CS_EN(spi->chip_select));
0537 }
0538 qspi->mmap_enabled = true;
0539 qspi->current_cs = spi->chip_select;
0540 }
0541
0542 static void ti_qspi_disable_memory_map(struct spi_device *spi)
0543 {
0544 struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
0545
0546 ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
0547 if (qspi->ctrl_base)
0548 regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
0549 MEM_CS_MASK, 0);
0550 qspi->mmap_enabled = false;
0551 qspi->current_cs = -1;
0552 }
0553
0554 static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
0555 u8 data_nbits, u8 addr_width,
0556 u8 dummy_bytes)
0557 {
0558 struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
0559 u32 memval = opcode;
0560
0561 switch (data_nbits) {
0562 case SPI_NBITS_QUAD:
0563 memval |= QSPI_SETUP_RD_QUAD;
0564 break;
0565 case SPI_NBITS_DUAL:
0566 memval |= QSPI_SETUP_RD_DUAL;
0567 break;
0568 default:
0569 memval |= QSPI_SETUP_RD_NORMAL;
0570 break;
0571 }
0572 memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
0573 dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
0574 ti_qspi_write(qspi, memval,
0575 QSPI_SPI_SETUP_REG(spi->chip_select));
0576 }
0577
0578 static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
0579 {
0580 struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
0581 size_t max_len;
0582
0583 if (op->data.dir == SPI_MEM_DATA_IN) {
0584 if (op->addr.val < qspi->mmap_size) {
0585
0586 if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
0587 max_len = qspi->mmap_size - op->addr.val;
0588 op->data.nbytes = min((size_t) op->data.nbytes,
0589 max_len);
0590 }
0591 } else {
0592
0593
0594
0595
0596
0597 max_len = QSPI_FRAME;
0598 max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
0599 op->data.nbytes = min((size_t) op->data.nbytes,
0600 max_len);
0601 }
0602 }
0603
0604 return 0;
0605 }
0606
0607 static int ti_qspi_exec_mem_op(struct spi_mem *mem,
0608 const struct spi_mem_op *op)
0609 {
0610 struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
0611 u32 from = 0;
0612 int ret = 0;
0613
0614
0615 if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
0616 !op->addr.nbytes || op->addr.nbytes > 4)
0617 return -ENOTSUPP;
0618
0619
0620 from = op->addr.val;
0621 if (from + op->data.nbytes > qspi->mmap_size)
0622 return -ENOTSUPP;
0623
0624 mutex_lock(&qspi->list_lock);
0625
0626 if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) {
0627 ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz);
0628 ti_qspi_enable_memory_map(mem->spi);
0629 }
0630 ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
0631 op->addr.nbytes, op->dummy.nbytes);
0632
0633 if (qspi->rx_chan) {
0634 struct sg_table sgt;
0635
0636 if (virt_addr_valid(op->data.buf.in) &&
0637 !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
0638 &sgt)) {
0639 ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
0640 spi_controller_dma_unmap_mem_op_data(mem->spi->master,
0641 op, &sgt);
0642 } else {
0643 ret = ti_qspi_dma_bounce_buffer(qspi, from,
0644 op->data.buf.in,
0645 op->data.nbytes);
0646 }
0647 } else {
0648 memcpy_fromio(op->data.buf.in, qspi->mmap_base + from,
0649 op->data.nbytes);
0650 }
0651
0652 mutex_unlock(&qspi->list_lock);
0653
0654 return ret;
0655 }
0656
0657 static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
0658 .exec_op = ti_qspi_exec_mem_op,
0659 .adjust_op_size = ti_qspi_adjust_op_size,
0660 };
0661
0662 static int ti_qspi_start_transfer_one(struct spi_master *master,
0663 struct spi_message *m)
0664 {
0665 struct ti_qspi *qspi = spi_master_get_devdata(master);
0666 struct spi_device *spi = m->spi;
0667 struct spi_transfer *t;
0668 int status = 0, ret;
0669 unsigned int frame_len_words, transfer_len_words;
0670 int wlen;
0671
0672
0673 qspi->dc = 0;
0674
0675 if (spi->mode & SPI_CPHA)
0676 qspi->dc |= QSPI_CKPHA(spi->chip_select);
0677 if (spi->mode & SPI_CPOL)
0678 qspi->dc |= QSPI_CKPOL(spi->chip_select);
0679 if (spi->mode & SPI_CS_HIGH)
0680 qspi->dc |= QSPI_CSPOL(spi->chip_select);
0681
0682 frame_len_words = 0;
0683 list_for_each_entry(t, &m->transfers, transfer_list)
0684 frame_len_words += t->len / (t->bits_per_word >> 3);
0685 frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
0686
0687
0688 qspi->cmd = 0;
0689 qspi->cmd |= QSPI_EN_CS(spi->chip_select);
0690 qspi->cmd |= QSPI_FLEN(frame_len_words);
0691
0692 ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
0693
0694 mutex_lock(&qspi->list_lock);
0695
0696 if (qspi->mmap_enabled)
0697 ti_qspi_disable_memory_map(spi);
0698
0699 list_for_each_entry(t, &m->transfers, transfer_list) {
0700 qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
0701 QSPI_WLEN(t->bits_per_word));
0702
0703 wlen = t->bits_per_word >> 3;
0704 transfer_len_words = min(t->len / wlen, frame_len_words);
0705
0706 ti_qspi_setup_clk(qspi, t->speed_hz);
0707 ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
0708 if (ret) {
0709 dev_dbg(qspi->dev, "transfer message failed\n");
0710 mutex_unlock(&qspi->list_lock);
0711 return -EINVAL;
0712 }
0713
0714 m->actual_length += transfer_len_words * wlen;
0715 frame_len_words -= transfer_len_words;
0716 if (frame_len_words == 0)
0717 break;
0718 }
0719
0720 mutex_unlock(&qspi->list_lock);
0721
0722 ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
0723 m->status = status;
0724 spi_finalize_current_message(master);
0725
0726 return status;
0727 }
0728
0729 static int ti_qspi_runtime_resume(struct device *dev)
0730 {
0731 struct ti_qspi *qspi;
0732
0733 qspi = dev_get_drvdata(dev);
0734 ti_qspi_restore_ctx(qspi);
0735
0736 return 0;
0737 }
0738
0739 static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
0740 {
0741 if (qspi->rx_bb_addr)
0742 dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
0743 qspi->rx_bb_addr,
0744 qspi->rx_bb_dma_addr);
0745
0746 if (qspi->rx_chan)
0747 dma_release_channel(qspi->rx_chan);
0748 }
0749
0750 static const struct of_device_id ti_qspi_match[] = {
0751 {.compatible = "ti,dra7xxx-qspi" },
0752 {.compatible = "ti,am4372-qspi" },
0753 {},
0754 };
0755 MODULE_DEVICE_TABLE(of, ti_qspi_match);
0756
0757 static int ti_qspi_probe(struct platform_device *pdev)
0758 {
0759 struct ti_qspi *qspi;
0760 struct spi_master *master;
0761 struct resource *r, *res_mmap;
0762 struct device_node *np = pdev->dev.of_node;
0763 u32 max_freq;
0764 int ret = 0, num_cs, irq;
0765 dma_cap_mask_t mask;
0766
0767 master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
0768 if (!master)
0769 return -ENOMEM;
0770
0771 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
0772
0773 master->flags = SPI_MASTER_HALF_DUPLEX;
0774 master->setup = ti_qspi_setup;
0775 master->auto_runtime_pm = true;
0776 master->transfer_one_message = ti_qspi_start_transfer_one;
0777 master->dev.of_node = pdev->dev.of_node;
0778 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
0779 SPI_BPW_MASK(8);
0780 master->mem_ops = &ti_qspi_mem_ops;
0781
0782 if (!of_property_read_u32(np, "num-cs", &num_cs))
0783 master->num_chipselect = num_cs;
0784
0785 qspi = spi_master_get_devdata(master);
0786 qspi->master = master;
0787 qspi->dev = &pdev->dev;
0788 platform_set_drvdata(pdev, qspi);
0789
0790 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
0791 if (r == NULL) {
0792 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0793 if (r == NULL) {
0794 dev_err(&pdev->dev, "missing platform data\n");
0795 ret = -ENODEV;
0796 goto free_master;
0797 }
0798 }
0799
0800 res_mmap = platform_get_resource_byname(pdev,
0801 IORESOURCE_MEM, "qspi_mmap");
0802 if (res_mmap == NULL) {
0803 res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
0804 if (res_mmap == NULL) {
0805 dev_err(&pdev->dev,
0806 "memory mapped resource not required\n");
0807 }
0808 }
0809
0810 if (res_mmap)
0811 qspi->mmap_size = resource_size(res_mmap);
0812
0813 irq = platform_get_irq(pdev, 0);
0814 if (irq < 0) {
0815 ret = irq;
0816 goto free_master;
0817 }
0818
0819 mutex_init(&qspi->list_lock);
0820
0821 qspi->base = devm_ioremap_resource(&pdev->dev, r);
0822 if (IS_ERR(qspi->base)) {
0823 ret = PTR_ERR(qspi->base);
0824 goto free_master;
0825 }
0826
0827
0828 if (of_property_read_bool(np, "syscon-chipselects")) {
0829 qspi->ctrl_base =
0830 syscon_regmap_lookup_by_phandle(np,
0831 "syscon-chipselects");
0832 if (IS_ERR(qspi->ctrl_base)) {
0833 ret = PTR_ERR(qspi->ctrl_base);
0834 goto free_master;
0835 }
0836 ret = of_property_read_u32_index(np,
0837 "syscon-chipselects",
0838 1, &qspi->ctrl_reg);
0839 if (ret) {
0840 dev_err(&pdev->dev,
0841 "couldn't get ctrl_mod reg index\n");
0842 goto free_master;
0843 }
0844 }
0845
0846 qspi->fclk = devm_clk_get(&pdev->dev, "fck");
0847 if (IS_ERR(qspi->fclk)) {
0848 ret = PTR_ERR(qspi->fclk);
0849 dev_err(&pdev->dev, "could not get clk: %d\n", ret);
0850 }
0851
0852 pm_runtime_use_autosuspend(&pdev->dev);
0853 pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
0854 pm_runtime_enable(&pdev->dev);
0855
0856 if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
0857 master->max_speed_hz = max_freq;
0858
0859 dma_cap_zero(mask);
0860 dma_cap_set(DMA_MEMCPY, mask);
0861
0862 qspi->rx_chan = dma_request_chan_by_mask(&mask);
0863 if (IS_ERR(qspi->rx_chan)) {
0864 dev_err(qspi->dev,
0865 "No Rx DMA available, trying mmap mode\n");
0866 qspi->rx_chan = NULL;
0867 ret = 0;
0868 goto no_dma;
0869 }
0870 qspi->rx_bb_addr = dma_alloc_coherent(qspi->dev,
0871 QSPI_DMA_BUFFER_SIZE,
0872 &qspi->rx_bb_dma_addr,
0873 GFP_KERNEL | GFP_DMA);
0874 if (!qspi->rx_bb_addr) {
0875 dev_err(qspi->dev,
0876 "dma_alloc_coherent failed, using PIO mode\n");
0877 dma_release_channel(qspi->rx_chan);
0878 goto no_dma;
0879 }
0880 master->dma_rx = qspi->rx_chan;
0881 init_completion(&qspi->transfer_complete);
0882 if (res_mmap)
0883 qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
0884
0885 no_dma:
0886 if (!qspi->rx_chan && res_mmap) {
0887 qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
0888 if (IS_ERR(qspi->mmap_base)) {
0889 dev_info(&pdev->dev,
0890 "mmap failed with error %ld using PIO mode\n",
0891 PTR_ERR(qspi->mmap_base));
0892 qspi->mmap_base = NULL;
0893 master->mem_ops = NULL;
0894 }
0895 }
0896 qspi->mmap_enabled = false;
0897 qspi->current_cs = -1;
0898
0899 ret = devm_spi_register_master(&pdev->dev, master);
0900 if (!ret)
0901 return 0;
0902
0903 ti_qspi_dma_cleanup(qspi);
0904
0905 pm_runtime_disable(&pdev->dev);
0906 free_master:
0907 spi_master_put(master);
0908 return ret;
0909 }
0910
0911 static int ti_qspi_remove(struct platform_device *pdev)
0912 {
0913 struct ti_qspi *qspi = platform_get_drvdata(pdev);
0914 int rc;
0915
0916 rc = spi_master_suspend(qspi->master);
0917 if (rc)
0918 return rc;
0919
0920 pm_runtime_put_sync(&pdev->dev);
0921 pm_runtime_disable(&pdev->dev);
0922
0923 ti_qspi_dma_cleanup(qspi);
0924
0925 return 0;
0926 }
0927
0928 static const struct dev_pm_ops ti_qspi_pm_ops = {
0929 .runtime_resume = ti_qspi_runtime_resume,
0930 };
0931
0932 static struct platform_driver ti_qspi_driver = {
0933 .probe = ti_qspi_probe,
0934 .remove = ti_qspi_remove,
0935 .driver = {
0936 .name = "ti-qspi",
0937 .pm = &ti_qspi_pm_ops,
0938 .of_match_table = ti_qspi_match,
0939 }
0940 };
0941
0942 module_platform_driver(ti_qspi_driver);
0943
0944 MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
0945 MODULE_LICENSE("GPL v2");
0946 MODULE_DESCRIPTION("TI QSPI controller driver");
0947 MODULE_ALIAS("platform:ti-qspi");