0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/kernel.h>
0022 #include <linux/ioport.h>
0023 #include <linux/of.h>
0024 #include <linux/of_device.h>
0025 #include <linux/platform_device.h>
0026 #include <linux/delay.h>
0027 #include <linux/interrupt.h>
0028 #include <linux/dma-mapping.h>
0029 #include <linux/dmaengine.h>
0030 #include <linux/highmem.h>
0031 #include <linux/clk.h>
0032 #include <linux/err.h>
0033 #include <linux/completion.h>
0034 #include <linux/pinctrl/consumer.h>
0035 #include <linux/regulator/consumer.h>
0036 #include <linux/pm_runtime.h>
0037 #include <linux/module.h>
0038 #include <linux/stmp_device.h>
0039 #include <linux/spi/spi.h>
0040 #include <linux/spi/mxs-spi.h>
0041 #include <trace/events/spi.h>
0042
0043 #define DRIVER_NAME "mxs-spi"
0044
0045
0046 #define SSP_TIMEOUT 10000
0047
0048 #define SG_MAXLEN 0xff00
0049
0050
0051
0052
0053
0054 #define TXRX_WRITE (1<<0)
0055 #define TXRX_DEASSERT_CS (1<<1)
0056
0057 struct mxs_spi {
0058 struct mxs_ssp ssp;
0059 struct completion c;
0060 unsigned int sck;
0061 };
0062
0063 static int mxs_spi_setup_transfer(struct spi_device *dev,
0064 const struct spi_transfer *t)
0065 {
0066 struct mxs_spi *spi = spi_master_get_devdata(dev->master);
0067 struct mxs_ssp *ssp = &spi->ssp;
0068 const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
0069
0070 if (hz == 0) {
0071 dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
0072 return -EINVAL;
0073 }
0074
0075 if (hz != spi->sck) {
0076 mxs_ssp_set_clk_rate(ssp, hz);
0077
0078
0079
0080
0081
0082 spi->sck = hz;
0083
0084
0085
0086
0087 }
0088
0089 writel(BM_SSP_CTRL0_LOCK_CS,
0090 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
0091
0092 writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
0093 BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
0094 ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
0095 ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
0096 ssp->base + HW_SSP_CTRL1(ssp));
0097
0098 writel(0x0, ssp->base + HW_SSP_CMD0);
0099 writel(0x0, ssp->base + HW_SSP_CMD1);
0100
0101 return 0;
0102 }
0103
0104 static u32 mxs_spi_cs_to_reg(unsigned cs)
0105 {
0106 u32 select = 0;
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 if (cs & 1)
0117 select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
0118 if (cs & 2)
0119 select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
0120
0121 return select;
0122 }
0123
0124 static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
0125 {
0126 const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
0127 struct mxs_ssp *ssp = &spi->ssp;
0128 u32 reg;
0129
0130 do {
0131 reg = readl_relaxed(ssp->base + offset);
0132
0133 if (!set)
0134 reg = ~reg;
0135
0136 reg &= mask;
0137
0138 if (reg == mask)
0139 return 0;
0140 } while (time_before(jiffies, timeout));
0141
0142 return -ETIMEDOUT;
0143 }
0144
0145 static void mxs_ssp_dma_irq_callback(void *param)
0146 {
0147 struct mxs_spi *spi = param;
0148
0149 complete(&spi->c);
0150 }
0151
0152 static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
0153 {
0154 struct mxs_ssp *ssp = dev_id;
0155
0156 dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
0157 __func__, __LINE__,
0158 readl(ssp->base + HW_SSP_CTRL1(ssp)),
0159 readl(ssp->base + HW_SSP_STATUS(ssp)));
0160 return IRQ_HANDLED;
0161 }
0162
0163 static int mxs_spi_txrx_dma(struct mxs_spi *spi,
0164 unsigned char *buf, int len,
0165 unsigned int flags)
0166 {
0167 struct mxs_ssp *ssp = &spi->ssp;
0168 struct dma_async_tx_descriptor *desc = NULL;
0169 const bool vmalloced_buf = is_vmalloc_addr(buf);
0170 const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
0171 const int sgs = DIV_ROUND_UP(len, desc_len);
0172 int sg_count;
0173 int min, ret;
0174 u32 ctrl0;
0175 struct page *vm_page;
0176 struct {
0177 u32 pio[4];
0178 struct scatterlist sg;
0179 } *dma_xfer;
0180
0181 if (!len)
0182 return -EINVAL;
0183
0184 dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL);
0185 if (!dma_xfer)
0186 return -ENOMEM;
0187
0188 reinit_completion(&spi->c);
0189
0190
0191 ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
0192 ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
0193 BM_SSP_CTRL0_READ);
0194 ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
0195
0196 if (!(flags & TXRX_WRITE))
0197 ctrl0 |= BM_SSP_CTRL0_READ;
0198
0199
0200 for (sg_count = 0; sg_count < sgs; sg_count++) {
0201
0202 min = min(len, desc_len);
0203
0204
0205
0206
0207
0208 if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
0209 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
0210
0211 if (ssp->devid == IMX23_SSP) {
0212 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
0213 ctrl0 |= min;
0214 }
0215
0216 dma_xfer[sg_count].pio[0] = ctrl0;
0217 dma_xfer[sg_count].pio[3] = min;
0218
0219 if (vmalloced_buf) {
0220 vm_page = vmalloc_to_page(buf);
0221 if (!vm_page) {
0222 ret = -ENOMEM;
0223 goto err_vmalloc;
0224 }
0225
0226 sg_init_table(&dma_xfer[sg_count].sg, 1);
0227 sg_set_page(&dma_xfer[sg_count].sg, vm_page,
0228 min, offset_in_page(buf));
0229 } else {
0230 sg_init_one(&dma_xfer[sg_count].sg, buf, min);
0231 }
0232
0233 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
0234 (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
0235
0236 len -= min;
0237 buf += min;
0238
0239
0240 desc = dmaengine_prep_slave_sg(ssp->dmach,
0241 (struct scatterlist *)dma_xfer[sg_count].pio,
0242 (ssp->devid == IMX23_SSP) ? 1 : 4,
0243 DMA_TRANS_NONE,
0244 sg_count ? DMA_PREP_INTERRUPT : 0);
0245 if (!desc) {
0246 dev_err(ssp->dev,
0247 "Failed to get PIO reg. write descriptor.\n");
0248 ret = -EINVAL;
0249 goto err_mapped;
0250 }
0251
0252 desc = dmaengine_prep_slave_sg(ssp->dmach,
0253 &dma_xfer[sg_count].sg, 1,
0254 (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
0255 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0256
0257 if (!desc) {
0258 dev_err(ssp->dev,
0259 "Failed to get DMA data write descriptor.\n");
0260 ret = -EINVAL;
0261 goto err_mapped;
0262 }
0263 }
0264
0265
0266
0267
0268
0269 desc->callback = mxs_ssp_dma_irq_callback;
0270 desc->callback_param = spi;
0271
0272
0273 dmaengine_submit(desc);
0274 dma_async_issue_pending(ssp->dmach);
0275
0276 if (!wait_for_completion_timeout(&spi->c,
0277 msecs_to_jiffies(SSP_TIMEOUT))) {
0278 dev_err(ssp->dev, "DMA transfer timeout\n");
0279 ret = -ETIMEDOUT;
0280 dmaengine_terminate_all(ssp->dmach);
0281 goto err_vmalloc;
0282 }
0283
0284 ret = 0;
0285
0286 err_vmalloc:
0287 while (--sg_count >= 0) {
0288 err_mapped:
0289 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
0290 (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
0291 }
0292
0293 kfree(dma_xfer);
0294
0295 return ret;
0296 }
0297
0298 static int mxs_spi_txrx_pio(struct mxs_spi *spi,
0299 unsigned char *buf, int len,
0300 unsigned int flags)
0301 {
0302 struct mxs_ssp *ssp = &spi->ssp;
0303
0304 writel(BM_SSP_CTRL0_IGNORE_CRC,
0305 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
0306
0307 while (len--) {
0308 if (len == 0 && (flags & TXRX_DEASSERT_CS))
0309 writel(BM_SSP_CTRL0_IGNORE_CRC,
0310 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
0311
0312 if (ssp->devid == IMX23_SSP) {
0313 writel(BM_SSP_CTRL0_XFER_COUNT,
0314 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
0315 writel(1,
0316 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
0317 } else {
0318 writel(1, ssp->base + HW_SSP_XFER_SIZE);
0319 }
0320
0321 if (flags & TXRX_WRITE)
0322 writel(BM_SSP_CTRL0_READ,
0323 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
0324 else
0325 writel(BM_SSP_CTRL0_READ,
0326 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
0327
0328 writel(BM_SSP_CTRL0_RUN,
0329 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
0330
0331 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
0332 return -ETIMEDOUT;
0333
0334 if (flags & TXRX_WRITE)
0335 writel(*buf, ssp->base + HW_SSP_DATA(ssp));
0336
0337 writel(BM_SSP_CTRL0_DATA_XFER,
0338 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
0339
0340 if (!(flags & TXRX_WRITE)) {
0341 if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
0342 BM_SSP_STATUS_FIFO_EMPTY, 0))
0343 return -ETIMEDOUT;
0344
0345 *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
0346 }
0347
0348 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
0349 return -ETIMEDOUT;
0350
0351 buf++;
0352 }
0353
0354 if (len <= 0)
0355 return 0;
0356
0357 return -ETIMEDOUT;
0358 }
0359
0360 static int mxs_spi_transfer_one(struct spi_master *master,
0361 struct spi_message *m)
0362 {
0363 struct mxs_spi *spi = spi_master_get_devdata(master);
0364 struct mxs_ssp *ssp = &spi->ssp;
0365 struct spi_transfer *t;
0366 unsigned int flag;
0367 int status = 0;
0368
0369
0370 writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
0371 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
0372 writel(mxs_spi_cs_to_reg(m->spi->chip_select),
0373 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
0374
0375 list_for_each_entry(t, &m->transfers, transfer_list) {
0376
0377 trace_spi_transfer_start(m, t);
0378
0379 status = mxs_spi_setup_transfer(m->spi, t);
0380 if (status)
0381 break;
0382
0383
0384 flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
0385 TXRX_DEASSERT_CS : 0;
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396 if (t->len < 32) {
0397 writel(BM_SSP_CTRL1_DMA_ENABLE,
0398 ssp->base + HW_SSP_CTRL1(ssp) +
0399 STMP_OFFSET_REG_CLR);
0400
0401 if (t->tx_buf)
0402 status = mxs_spi_txrx_pio(spi,
0403 (void *)t->tx_buf,
0404 t->len, flag | TXRX_WRITE);
0405 if (t->rx_buf)
0406 status = mxs_spi_txrx_pio(spi,
0407 t->rx_buf, t->len,
0408 flag);
0409 } else {
0410 writel(BM_SSP_CTRL1_DMA_ENABLE,
0411 ssp->base + HW_SSP_CTRL1(ssp) +
0412 STMP_OFFSET_REG_SET);
0413
0414 if (t->tx_buf)
0415 status = mxs_spi_txrx_dma(spi,
0416 (void *)t->tx_buf, t->len,
0417 flag | TXRX_WRITE);
0418 if (t->rx_buf)
0419 status = mxs_spi_txrx_dma(spi,
0420 t->rx_buf, t->len,
0421 flag);
0422 }
0423
0424 trace_spi_transfer_stop(m, t);
0425
0426 if (status) {
0427 stmp_reset_block(ssp->base);
0428 break;
0429 }
0430
0431 m->actual_length += t->len;
0432 }
0433
0434 m->status = status;
0435 spi_finalize_current_message(master);
0436
0437 return status;
0438 }
0439
0440 static int mxs_spi_runtime_suspend(struct device *dev)
0441 {
0442 struct spi_master *master = dev_get_drvdata(dev);
0443 struct mxs_spi *spi = spi_master_get_devdata(master);
0444 struct mxs_ssp *ssp = &spi->ssp;
0445 int ret;
0446
0447 clk_disable_unprepare(ssp->clk);
0448
0449 ret = pinctrl_pm_select_idle_state(dev);
0450 if (ret) {
0451 int ret2 = clk_prepare_enable(ssp->clk);
0452
0453 if (ret2)
0454 dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n",
0455 ret, ret2);
0456 }
0457
0458 return ret;
0459 }
0460
0461 static int mxs_spi_runtime_resume(struct device *dev)
0462 {
0463 struct spi_master *master = dev_get_drvdata(dev);
0464 struct mxs_spi *spi = spi_master_get_devdata(master);
0465 struct mxs_ssp *ssp = &spi->ssp;
0466 int ret;
0467
0468 ret = pinctrl_pm_select_default_state(dev);
0469 if (ret)
0470 return ret;
0471
0472 ret = clk_prepare_enable(ssp->clk);
0473 if (ret)
0474 pinctrl_pm_select_idle_state(dev);
0475
0476 return ret;
0477 }
0478
0479 static int __maybe_unused mxs_spi_suspend(struct device *dev)
0480 {
0481 struct spi_master *master = dev_get_drvdata(dev);
0482 int ret;
0483
0484 ret = spi_master_suspend(master);
0485 if (ret)
0486 return ret;
0487
0488 if (!pm_runtime_suspended(dev))
0489 return mxs_spi_runtime_suspend(dev);
0490 else
0491 return 0;
0492 }
0493
0494 static int __maybe_unused mxs_spi_resume(struct device *dev)
0495 {
0496 struct spi_master *master = dev_get_drvdata(dev);
0497 int ret;
0498
0499 if (!pm_runtime_suspended(dev))
0500 ret = mxs_spi_runtime_resume(dev);
0501 else
0502 ret = 0;
0503 if (ret)
0504 return ret;
0505
0506 ret = spi_master_resume(master);
0507 if (ret < 0 && !pm_runtime_suspended(dev))
0508 mxs_spi_runtime_suspend(dev);
0509
0510 return ret;
0511 }
0512
0513 static const struct dev_pm_ops mxs_spi_pm = {
0514 SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend,
0515 mxs_spi_runtime_resume, NULL)
0516 SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
0517 };
0518
0519 static const struct of_device_id mxs_spi_dt_ids[] = {
0520 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
0521 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
0522 { }
0523 };
0524 MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
0525
0526 static int mxs_spi_probe(struct platform_device *pdev)
0527 {
0528 const struct of_device_id *of_id =
0529 of_match_device(mxs_spi_dt_ids, &pdev->dev);
0530 struct device_node *np = pdev->dev.of_node;
0531 struct spi_master *master;
0532 struct mxs_spi *spi;
0533 struct mxs_ssp *ssp;
0534 struct clk *clk;
0535 void __iomem *base;
0536 int devid, clk_freq;
0537 int ret = 0, irq_err;
0538
0539
0540
0541
0542
0543
0544 const int clk_freq_default = 160000000;
0545
0546 irq_err = platform_get_irq(pdev, 0);
0547 if (irq_err < 0)
0548 return irq_err;
0549
0550 base = devm_platform_ioremap_resource(pdev, 0);
0551 if (IS_ERR(base))
0552 return PTR_ERR(base);
0553
0554 clk = devm_clk_get(&pdev->dev, NULL);
0555 if (IS_ERR(clk))
0556 return PTR_ERR(clk);
0557
0558 devid = (enum mxs_ssp_id) of_id->data;
0559 ret = of_property_read_u32(np, "clock-frequency",
0560 &clk_freq);
0561 if (ret)
0562 clk_freq = clk_freq_default;
0563
0564 master = spi_alloc_master(&pdev->dev, sizeof(*spi));
0565 if (!master)
0566 return -ENOMEM;
0567
0568 platform_set_drvdata(pdev, master);
0569
0570 master->transfer_one_message = mxs_spi_transfer_one;
0571 master->bits_per_word_mask = SPI_BPW_MASK(8);
0572 master->mode_bits = SPI_CPOL | SPI_CPHA;
0573 master->num_chipselect = 3;
0574 master->dev.of_node = np;
0575 master->flags = SPI_MASTER_HALF_DUPLEX;
0576 master->auto_runtime_pm = true;
0577
0578 spi = spi_master_get_devdata(master);
0579 ssp = &spi->ssp;
0580 ssp->dev = &pdev->dev;
0581 ssp->clk = clk;
0582 ssp->base = base;
0583 ssp->devid = devid;
0584
0585 init_completion(&spi->c);
0586
0587 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
0588 dev_name(&pdev->dev), ssp);
0589 if (ret)
0590 goto out_master_free;
0591
0592 ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
0593 if (IS_ERR(ssp->dmach)) {
0594 dev_err(ssp->dev, "Failed to request DMA\n");
0595 ret = PTR_ERR(ssp->dmach);
0596 goto out_master_free;
0597 }
0598
0599 pm_runtime_enable(ssp->dev);
0600 if (!pm_runtime_enabled(ssp->dev)) {
0601 ret = mxs_spi_runtime_resume(ssp->dev);
0602 if (ret < 0) {
0603 dev_err(ssp->dev, "runtime resume failed\n");
0604 goto out_dma_release;
0605 }
0606 }
0607
0608 ret = pm_runtime_resume_and_get(ssp->dev);
0609 if (ret < 0) {
0610 dev_err(ssp->dev, "runtime_get_sync failed\n");
0611 goto out_pm_runtime_disable;
0612 }
0613
0614 clk_set_rate(ssp->clk, clk_freq);
0615
0616 ret = stmp_reset_block(ssp->base);
0617 if (ret)
0618 goto out_pm_runtime_put;
0619
0620 ret = devm_spi_register_master(&pdev->dev, master);
0621 if (ret) {
0622 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
0623 goto out_pm_runtime_put;
0624 }
0625
0626 pm_runtime_put(ssp->dev);
0627
0628 return 0;
0629
0630 out_pm_runtime_put:
0631 pm_runtime_put(ssp->dev);
0632 out_pm_runtime_disable:
0633 pm_runtime_disable(ssp->dev);
0634 out_dma_release:
0635 dma_release_channel(ssp->dmach);
0636 out_master_free:
0637 spi_master_put(master);
0638 return ret;
0639 }
0640
0641 static int mxs_spi_remove(struct platform_device *pdev)
0642 {
0643 struct spi_master *master;
0644 struct mxs_spi *spi;
0645 struct mxs_ssp *ssp;
0646
0647 master = platform_get_drvdata(pdev);
0648 spi = spi_master_get_devdata(master);
0649 ssp = &spi->ssp;
0650
0651 pm_runtime_disable(&pdev->dev);
0652 if (!pm_runtime_status_suspended(&pdev->dev))
0653 mxs_spi_runtime_suspend(&pdev->dev);
0654
0655 dma_release_channel(ssp->dmach);
0656
0657 return 0;
0658 }
0659
0660 static struct platform_driver mxs_spi_driver = {
0661 .probe = mxs_spi_probe,
0662 .remove = mxs_spi_remove,
0663 .driver = {
0664 .name = DRIVER_NAME,
0665 .of_match_table = mxs_spi_dt_ids,
0666 .pm = &mxs_spi_pm,
0667 },
0668 };
0669
0670 module_platform_driver(mxs_spi_driver);
0671
0672 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
0673 MODULE_DESCRIPTION("MXS SPI master driver");
0674 MODULE_LICENSE("GPL");
0675 MODULE_ALIAS("platform:mxs-spi");