Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
0004  * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
0005  */
0006 #include <linux/bitfield.h>
0007 #include <linux/clk.h>
0008 #include <linux/dmaengine.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/errno.h>
0011 #include <linux/io.h>
0012 #include <linux/iopoll.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/module.h>
0015 #include <linux/mutex.h>
0016 #include <linux/of.h>
0017 #include <linux/of_device.h>
0018 #include <linux/pinctrl/consumer.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/reset.h>
0022 #include <linux/sizes.h>
0023 #include <linux/spi/spi-mem.h>
0024 
0025 #define QSPI_CR         0x00
0026 #define CR_EN           BIT(0)
0027 #define CR_ABORT        BIT(1)
0028 #define CR_DMAEN        BIT(2)
0029 #define CR_TCEN         BIT(3)
0030 #define CR_SSHIFT       BIT(4)
0031 #define CR_DFM          BIT(6)
0032 #define CR_FSEL         BIT(7)
0033 #define CR_FTHRES_SHIFT     8
0034 #define CR_TEIE         BIT(16)
0035 #define CR_TCIE         BIT(17)
0036 #define CR_FTIE         BIT(18)
0037 #define CR_SMIE         BIT(19)
0038 #define CR_TOIE         BIT(20)
0039 #define CR_APMS         BIT(22)
0040 #define CR_PRESC_MASK       GENMASK(31, 24)
0041 
0042 #define QSPI_DCR        0x04
0043 #define DCR_FSIZE_MASK      GENMASK(20, 16)
0044 
0045 #define QSPI_SR         0x08
0046 #define SR_TEF          BIT(0)
0047 #define SR_TCF          BIT(1)
0048 #define SR_FTF          BIT(2)
0049 #define SR_SMF          BIT(3)
0050 #define SR_TOF          BIT(4)
0051 #define SR_BUSY         BIT(5)
0052 #define SR_FLEVEL_MASK      GENMASK(13, 8)
0053 
0054 #define QSPI_FCR        0x0c
0055 #define FCR_CTEF        BIT(0)
0056 #define FCR_CTCF        BIT(1)
0057 #define FCR_CSMF        BIT(3)
0058 
0059 #define QSPI_DLR        0x10
0060 
0061 #define QSPI_CCR        0x14
0062 #define CCR_INST_MASK       GENMASK(7, 0)
0063 #define CCR_IMODE_MASK      GENMASK(9, 8)
0064 #define CCR_ADMODE_MASK     GENMASK(11, 10)
0065 #define CCR_ADSIZE_MASK     GENMASK(13, 12)
0066 #define CCR_DCYC_MASK       GENMASK(22, 18)
0067 #define CCR_DMODE_MASK      GENMASK(25, 24)
0068 #define CCR_FMODE_MASK      GENMASK(27, 26)
0069 #define CCR_FMODE_INDW      (0U << 26)
0070 #define CCR_FMODE_INDR      (1U << 26)
0071 #define CCR_FMODE_APM       (2U << 26)
0072 #define CCR_FMODE_MM        (3U << 26)
0073 #define CCR_BUSWIDTH_0      0x0
0074 #define CCR_BUSWIDTH_1      0x1
0075 #define CCR_BUSWIDTH_2      0x2
0076 #define CCR_BUSWIDTH_4      0x3
0077 
0078 #define QSPI_AR         0x18
0079 #define QSPI_ABR        0x1c
0080 #define QSPI_DR         0x20
0081 #define QSPI_PSMKR      0x24
0082 #define QSPI_PSMAR      0x28
0083 #define QSPI_PIR        0x2c
0084 #define QSPI_LPTR       0x30
0085 
0086 #define STM32_QSPI_MAX_MMAP_SZ  SZ_256M
0087 #define STM32_QSPI_MAX_NORCHIP  2
0088 
0089 #define STM32_FIFO_TIMEOUT_US 30000
0090 #define STM32_BUSY_TIMEOUT_US 100000
0091 #define STM32_ABT_TIMEOUT_US 100000
0092 #define STM32_COMP_TIMEOUT_MS 1000
0093 #define STM32_AUTOSUSPEND_DELAY -1
0094 
0095 struct stm32_qspi_flash {
0096     u32 cs;
0097     u32 presc;
0098 };
0099 
0100 struct stm32_qspi {
0101     struct device *dev;
0102     struct spi_controller *ctrl;
0103     phys_addr_t phys_base;
0104     void __iomem *io_base;
0105     void __iomem *mm_base;
0106     resource_size_t mm_size;
0107     struct clk *clk;
0108     u32 clk_rate;
0109     struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
0110     struct completion data_completion;
0111     struct completion match_completion;
0112     u32 fmode;
0113 
0114     struct dma_chan *dma_chtx;
0115     struct dma_chan *dma_chrx;
0116     struct completion dma_completion;
0117 
0118     u32 cr_reg;
0119     u32 dcr_reg;
0120     unsigned long status_timeout;
0121 
0122     /*
0123      * to protect device configuration, could be different between
0124      * 2 flash access (bk1, bk2)
0125      */
0126     struct mutex lock;
0127 };
0128 
0129 static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
0130 {
0131     struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
0132     u32 cr, sr;
0133 
0134     cr = readl_relaxed(qspi->io_base + QSPI_CR);
0135     sr = readl_relaxed(qspi->io_base + QSPI_SR);
0136 
0137     if (cr & CR_SMIE && sr & SR_SMF) {
0138         /* disable irq */
0139         cr &= ~CR_SMIE;
0140         writel_relaxed(cr, qspi->io_base + QSPI_CR);
0141         complete(&qspi->match_completion);
0142 
0143         return IRQ_HANDLED;
0144     }
0145 
0146     if (sr & (SR_TEF | SR_TCF)) {
0147         /* disable irq */
0148         cr &= ~CR_TCIE & ~CR_TEIE;
0149         writel_relaxed(cr, qspi->io_base + QSPI_CR);
0150         complete(&qspi->data_completion);
0151     }
0152 
0153     return IRQ_HANDLED;
0154 }
0155 
0156 static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
0157 {
0158     *val = readb_relaxed(addr);
0159 }
0160 
0161 static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
0162 {
0163     writeb_relaxed(*val, addr);
0164 }
0165 
0166 static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
0167                   const struct spi_mem_op *op)
0168 {
0169     void (*tx_fifo)(u8 *val, void __iomem *addr);
0170     u32 len = op->data.nbytes, sr;
0171     u8 *buf;
0172     int ret;
0173 
0174     if (op->data.dir == SPI_MEM_DATA_IN) {
0175         tx_fifo = stm32_qspi_read_fifo;
0176         buf = op->data.buf.in;
0177 
0178     } else {
0179         tx_fifo = stm32_qspi_write_fifo;
0180         buf = (u8 *)op->data.buf.out;
0181     }
0182 
0183     while (len--) {
0184         ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR,
0185                             sr, (sr & SR_FTF), 1,
0186                             STM32_FIFO_TIMEOUT_US);
0187         if (ret) {
0188             dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n",
0189                 len, sr);
0190             return ret;
0191         }
0192         tx_fifo(buf++, qspi->io_base + QSPI_DR);
0193     }
0194 
0195     return 0;
0196 }
0197 
0198 static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
0199                 const struct spi_mem_op *op)
0200 {
0201     memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val,
0202               op->data.nbytes);
0203     return 0;
0204 }
0205 
0206 static void stm32_qspi_dma_callback(void *arg)
0207 {
0208     struct completion *dma_completion = arg;
0209 
0210     complete(dma_completion);
0211 }
0212 
0213 static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
0214                  const struct spi_mem_op *op)
0215 {
0216     struct dma_async_tx_descriptor *desc;
0217     enum dma_transfer_direction dma_dir;
0218     struct dma_chan *dma_ch;
0219     struct sg_table sgt;
0220     dma_cookie_t cookie;
0221     u32 cr, t_out;
0222     int err;
0223 
0224     if (op->data.dir == SPI_MEM_DATA_IN) {
0225         dma_dir = DMA_DEV_TO_MEM;
0226         dma_ch = qspi->dma_chrx;
0227     } else {
0228         dma_dir = DMA_MEM_TO_DEV;
0229         dma_ch = qspi->dma_chtx;
0230     }
0231 
0232     /*
0233      * spi_map_buf return -EINVAL if the buffer is not DMA-able
0234      * (DMA-able: in vmalloc | kmap | virt_addr_valid)
0235      */
0236     err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
0237     if (err)
0238         return err;
0239 
0240     desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
0241                        dma_dir, DMA_PREP_INTERRUPT);
0242     if (!desc) {
0243         err = -ENOMEM;
0244         goto out_unmap;
0245     }
0246 
0247     cr = readl_relaxed(qspi->io_base + QSPI_CR);
0248 
0249     reinit_completion(&qspi->dma_completion);
0250     desc->callback = stm32_qspi_dma_callback;
0251     desc->callback_param = &qspi->dma_completion;
0252     cookie = dmaengine_submit(desc);
0253     err = dma_submit_error(cookie);
0254     if (err)
0255         goto out;
0256 
0257     dma_async_issue_pending(dma_ch);
0258 
0259     writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
0260 
0261     t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
0262     if (!wait_for_completion_timeout(&qspi->dma_completion,
0263                      msecs_to_jiffies(t_out)))
0264         err = -ETIMEDOUT;
0265 
0266     if (err)
0267         dmaengine_terminate_all(dma_ch);
0268 
0269 out:
0270     writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
0271 out_unmap:
0272     spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
0273 
0274     return err;
0275 }
0276 
0277 static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
0278 {
0279     if (!op->data.nbytes)
0280         return 0;
0281 
0282     if (qspi->fmode == CCR_FMODE_MM)
0283         return stm32_qspi_tx_mm(qspi, op);
0284     else if (((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
0285          (op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx)) &&
0286           op->data.nbytes > 4)
0287         if (!stm32_qspi_tx_dma(qspi, op))
0288             return 0;
0289 
0290     return stm32_qspi_tx_poll(qspi, op);
0291 }
0292 
0293 static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
0294 {
0295     u32 sr;
0296 
0297     return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr,
0298                          !(sr & SR_BUSY), 1,
0299                          STM32_BUSY_TIMEOUT_US);
0300 }
0301 
0302 static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
0303 {
0304     u32 cr, sr;
0305     int err = 0;
0306 
0307     if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
0308         qspi->fmode == CCR_FMODE_APM)
0309         goto out;
0310 
0311     reinit_completion(&qspi->data_completion);
0312     cr = readl_relaxed(qspi->io_base + QSPI_CR);
0313     writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
0314 
0315     if (!wait_for_completion_timeout(&qspi->data_completion,
0316                 msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
0317         err = -ETIMEDOUT;
0318     } else {
0319         sr = readl_relaxed(qspi->io_base + QSPI_SR);
0320         if (sr & SR_TEF)
0321             err = -EIO;
0322     }
0323 
0324 out:
0325     /* clear flags */
0326     writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
0327     if (!err)
0328         err = stm32_qspi_wait_nobusy(qspi);
0329 
0330     return err;
0331 }
0332 
0333 static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
0334 {
0335     u32 cr;
0336 
0337     reinit_completion(&qspi->match_completion);
0338     cr = readl_relaxed(qspi->io_base + QSPI_CR);
0339     writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR);
0340 
0341     if (!wait_for_completion_timeout(&qspi->match_completion,
0342                 msecs_to_jiffies(qspi->status_timeout)))
0343         return -ETIMEDOUT;
0344 
0345     writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR);
0346 
0347     return 0;
0348 }
0349 
0350 static int stm32_qspi_get_mode(u8 buswidth)
0351 {
0352     if (buswidth == 4)
0353         return CCR_BUSWIDTH_4;
0354 
0355     return buswidth;
0356 }
0357 
0358 static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
0359 {
0360     struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
0361     struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
0362     u32 ccr, cr;
0363     int timeout, err = 0, err_poll_status = 0;
0364 
0365     dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
0366         op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
0367         op->dummy.buswidth, op->data.buswidth,
0368         op->addr.val, op->data.nbytes);
0369 
0370     cr = readl_relaxed(qspi->io_base + QSPI_CR);
0371     cr &= ~CR_PRESC_MASK & ~CR_FSEL;
0372     cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
0373     cr |= FIELD_PREP(CR_FSEL, flash->cs);
0374     writel_relaxed(cr, qspi->io_base + QSPI_CR);
0375 
0376     if (op->data.nbytes)
0377         writel_relaxed(op->data.nbytes - 1,
0378                    qspi->io_base + QSPI_DLR);
0379 
0380     ccr = qspi->fmode;
0381     ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
0382     ccr |= FIELD_PREP(CCR_IMODE_MASK,
0383               stm32_qspi_get_mode(op->cmd.buswidth));
0384 
0385     if (op->addr.nbytes) {
0386         ccr |= FIELD_PREP(CCR_ADMODE_MASK,
0387                   stm32_qspi_get_mode(op->addr.buswidth));
0388         ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
0389     }
0390 
0391     if (op->dummy.nbytes)
0392         ccr |= FIELD_PREP(CCR_DCYC_MASK,
0393                   op->dummy.nbytes * 8 / op->dummy.buswidth);
0394 
0395     if (op->data.nbytes) {
0396         ccr |= FIELD_PREP(CCR_DMODE_MASK,
0397                   stm32_qspi_get_mode(op->data.buswidth));
0398     }
0399 
0400     writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
0401 
0402     if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
0403         writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
0404 
0405     if (qspi->fmode == CCR_FMODE_APM)
0406         err_poll_status = stm32_qspi_wait_poll_status(qspi);
0407 
0408     err = stm32_qspi_tx(qspi, op);
0409 
0410     /*
0411      * Abort in:
0412      * -error case
0413      * -read memory map: prefetching must be stopped if we read the last
0414      *  byte of device (device size - fifo size). like device size is not
0415      *  knows, the prefetching is always stop.
0416      */
0417     if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM)
0418         goto abort;
0419 
0420     /* wait end of tx in indirect mode */
0421     err = stm32_qspi_wait_cmd(qspi);
0422     if (err)
0423         goto abort;
0424 
0425     return 0;
0426 
0427 abort:
0428     cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT;
0429     writel_relaxed(cr, qspi->io_base + QSPI_CR);
0430 
0431     /* wait clear of abort bit by hw */
0432     timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR,
0433                             cr, !(cr & CR_ABORT), 1,
0434                             STM32_ABT_TIMEOUT_US);
0435 
0436     writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR);
0437 
0438     if (err || err_poll_status || timeout)
0439         dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
0440             __func__, err, err_poll_status, timeout);
0441 
0442     return err;
0443 }
0444 
0445 static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op,
0446                   u16 mask, u16 match,
0447                   unsigned long initial_delay_us,
0448                   unsigned long polling_rate_us,
0449                   unsigned long timeout_ms)
0450 {
0451     struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
0452     int ret;
0453 
0454     if (!spi_mem_supports_op(mem, op))
0455         return -EOPNOTSUPP;
0456 
0457     ret = pm_runtime_resume_and_get(qspi->dev);
0458     if (ret < 0)
0459         return ret;
0460 
0461     mutex_lock(&qspi->lock);
0462 
0463     writel_relaxed(mask, qspi->io_base + QSPI_PSMKR);
0464     writel_relaxed(match, qspi->io_base + QSPI_PSMAR);
0465     qspi->fmode = CCR_FMODE_APM;
0466     qspi->status_timeout = timeout_ms;
0467 
0468     ret = stm32_qspi_send(mem, op);
0469     mutex_unlock(&qspi->lock);
0470 
0471     pm_runtime_mark_last_busy(qspi->dev);
0472     pm_runtime_put_autosuspend(qspi->dev);
0473 
0474     return ret;
0475 }
0476 
0477 static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
0478 {
0479     struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
0480     int ret;
0481 
0482     ret = pm_runtime_resume_and_get(qspi->dev);
0483     if (ret < 0)
0484         return ret;
0485 
0486     mutex_lock(&qspi->lock);
0487     if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
0488         qspi->fmode = CCR_FMODE_INDR;
0489     else
0490         qspi->fmode = CCR_FMODE_INDW;
0491 
0492     ret = stm32_qspi_send(mem, op);
0493     mutex_unlock(&qspi->lock);
0494 
0495     pm_runtime_mark_last_busy(qspi->dev);
0496     pm_runtime_put_autosuspend(qspi->dev);
0497 
0498     return ret;
0499 }
0500 
0501 static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
0502 {
0503     struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
0504 
0505     if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
0506         return -EOPNOTSUPP;
0507 
0508     /* should never happen, as mm_base == null is an error probe exit condition */
0509     if (!qspi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
0510         return -EOPNOTSUPP;
0511 
0512     if (!qspi->mm_size)
0513         return -EOPNOTSUPP;
0514 
0515     return 0;
0516 }
0517 
0518 static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
0519                       u64 offs, size_t len, void *buf)
0520 {
0521     struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
0522     struct spi_mem_op op;
0523     u32 addr_max;
0524     int ret;
0525 
0526     ret = pm_runtime_resume_and_get(qspi->dev);
0527     if (ret < 0)
0528         return ret;
0529 
0530     mutex_lock(&qspi->lock);
0531     /* make a local copy of desc op_tmpl and complete dirmap rdesc
0532      * spi_mem_op template with offs, len and *buf in  order to get
0533      * all needed transfer information into struct spi_mem_op
0534      */
0535     memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
0536     dev_dbg(qspi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
0537 
0538     op.data.nbytes = len;
0539     op.addr.val = desc->info.offset + offs;
0540     op.data.buf.in = buf;
0541 
0542     addr_max = op.addr.val + op.data.nbytes + 1;
0543     if (addr_max < qspi->mm_size && op.addr.buswidth)
0544         qspi->fmode = CCR_FMODE_MM;
0545     else
0546         qspi->fmode = CCR_FMODE_INDR;
0547 
0548     ret = stm32_qspi_send(desc->mem, &op);
0549     mutex_unlock(&qspi->lock);
0550 
0551     pm_runtime_mark_last_busy(qspi->dev);
0552     pm_runtime_put_autosuspend(qspi->dev);
0553 
0554     return ret ?: len;
0555 }
0556 
0557 static int stm32_qspi_setup(struct spi_device *spi)
0558 {
0559     struct spi_controller *ctrl = spi->master;
0560     struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
0561     struct stm32_qspi_flash *flash;
0562     u32 presc;
0563     int ret;
0564 
0565     if (ctrl->busy)
0566         return -EBUSY;
0567 
0568     if (!spi->max_speed_hz)
0569         return -EINVAL;
0570 
0571     ret = pm_runtime_resume_and_get(qspi->dev);
0572     if (ret < 0)
0573         return ret;
0574 
0575     presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
0576 
0577     flash = &qspi->flash[spi->chip_select];
0578     flash->cs = spi->chip_select;
0579     flash->presc = presc;
0580 
0581     mutex_lock(&qspi->lock);
0582     qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
0583     writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
0584 
0585     /* set dcr fsize to max address */
0586     qspi->dcr_reg = DCR_FSIZE_MASK;
0587     writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
0588     mutex_unlock(&qspi->lock);
0589 
0590     pm_runtime_mark_last_busy(qspi->dev);
0591     pm_runtime_put_autosuspend(qspi->dev);
0592 
0593     return 0;
0594 }
0595 
0596 static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
0597 {
0598     struct dma_slave_config dma_cfg;
0599     struct device *dev = qspi->dev;
0600     int ret = 0;
0601 
0602     memset(&dma_cfg, 0, sizeof(dma_cfg));
0603 
0604     dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
0605     dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
0606     dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
0607     dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
0608     dma_cfg.src_maxburst = 4;
0609     dma_cfg.dst_maxburst = 4;
0610 
0611     qspi->dma_chrx = dma_request_chan(dev, "rx");
0612     if (IS_ERR(qspi->dma_chrx)) {
0613         ret = PTR_ERR(qspi->dma_chrx);
0614         qspi->dma_chrx = NULL;
0615         if (ret == -EPROBE_DEFER)
0616             goto out;
0617     } else {
0618         if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
0619             dev_err(dev, "dma rx config failed\n");
0620             dma_release_channel(qspi->dma_chrx);
0621             qspi->dma_chrx = NULL;
0622         }
0623     }
0624 
0625     qspi->dma_chtx = dma_request_chan(dev, "tx");
0626     if (IS_ERR(qspi->dma_chtx)) {
0627         ret = PTR_ERR(qspi->dma_chtx);
0628         qspi->dma_chtx = NULL;
0629     } else {
0630         if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
0631             dev_err(dev, "dma tx config failed\n");
0632             dma_release_channel(qspi->dma_chtx);
0633             qspi->dma_chtx = NULL;
0634         }
0635     }
0636 
0637 out:
0638     init_completion(&qspi->dma_completion);
0639 
0640     if (ret != -EPROBE_DEFER)
0641         ret = 0;
0642 
0643     return ret;
0644 }
0645 
0646 static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
0647 {
0648     if (qspi->dma_chtx)
0649         dma_release_channel(qspi->dma_chtx);
0650     if (qspi->dma_chrx)
0651         dma_release_channel(qspi->dma_chrx);
0652 }
0653 
0654 /*
0655  * no special host constraint, so use default spi_mem_default_supports_op
0656  * to check supported mode.
0657  */
0658 static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
0659     .exec_op    = stm32_qspi_exec_op,
0660     .dirmap_create  = stm32_qspi_dirmap_create,
0661     .dirmap_read    = stm32_qspi_dirmap_read,
0662     .poll_status    = stm32_qspi_poll_status,
0663 };
0664 
0665 static int stm32_qspi_probe(struct platform_device *pdev)
0666 {
0667     struct device *dev = &pdev->dev;
0668     struct spi_controller *ctrl;
0669     struct reset_control *rstc;
0670     struct stm32_qspi *qspi;
0671     struct resource *res;
0672     int ret, irq;
0673 
0674     ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
0675     if (!ctrl)
0676         return -ENOMEM;
0677 
0678     qspi = spi_controller_get_devdata(ctrl);
0679     qspi->ctrl = ctrl;
0680 
0681     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
0682     qspi->io_base = devm_ioremap_resource(dev, res);
0683     if (IS_ERR(qspi->io_base))
0684         return PTR_ERR(qspi->io_base);
0685 
0686     qspi->phys_base = res->start;
0687 
0688     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
0689     qspi->mm_base = devm_ioremap_resource(dev, res);
0690     if (IS_ERR(qspi->mm_base))
0691         return PTR_ERR(qspi->mm_base);
0692 
0693     qspi->mm_size = resource_size(res);
0694     if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
0695         return -EINVAL;
0696 
0697     irq = platform_get_irq(pdev, 0);
0698     if (irq < 0)
0699         return irq;
0700 
0701     ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
0702                    dev_name(dev), qspi);
0703     if (ret) {
0704         dev_err(dev, "failed to request irq\n");
0705         return ret;
0706     }
0707 
0708     init_completion(&qspi->data_completion);
0709     init_completion(&qspi->match_completion);
0710 
0711     qspi->clk = devm_clk_get(dev, NULL);
0712     if (IS_ERR(qspi->clk))
0713         return PTR_ERR(qspi->clk);
0714 
0715     qspi->clk_rate = clk_get_rate(qspi->clk);
0716     if (!qspi->clk_rate)
0717         return -EINVAL;
0718 
0719     ret = clk_prepare_enable(qspi->clk);
0720     if (ret) {
0721         dev_err(dev, "can not enable the clock\n");
0722         return ret;
0723     }
0724 
0725     rstc = devm_reset_control_get_exclusive(dev, NULL);
0726     if (IS_ERR(rstc)) {
0727         ret = PTR_ERR(rstc);
0728         if (ret == -EPROBE_DEFER)
0729             goto err_clk_disable;
0730     } else {
0731         reset_control_assert(rstc);
0732         udelay(2);
0733         reset_control_deassert(rstc);
0734     }
0735 
0736     qspi->dev = dev;
0737     platform_set_drvdata(pdev, qspi);
0738     ret = stm32_qspi_dma_setup(qspi);
0739     if (ret)
0740         goto err_dma_free;
0741 
0742     mutex_init(&qspi->lock);
0743 
0744     ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
0745         | SPI_TX_DUAL | SPI_TX_QUAD;
0746     ctrl->setup = stm32_qspi_setup;
0747     ctrl->bus_num = -1;
0748     ctrl->mem_ops = &stm32_qspi_mem_ops;
0749     ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
0750     ctrl->dev.of_node = dev->of_node;
0751 
0752     pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY);
0753     pm_runtime_use_autosuspend(dev);
0754     pm_runtime_set_active(dev);
0755     pm_runtime_enable(dev);
0756     pm_runtime_get_noresume(dev);
0757 
0758     ret = spi_register_master(ctrl);
0759     if (ret)
0760         goto err_pm_runtime_free;
0761 
0762     pm_runtime_mark_last_busy(dev);
0763     pm_runtime_put_autosuspend(dev);
0764 
0765     return 0;
0766 
0767 err_pm_runtime_free:
0768     pm_runtime_get_sync(qspi->dev);
0769     /* disable qspi */
0770     writel_relaxed(0, qspi->io_base + QSPI_CR);
0771     mutex_destroy(&qspi->lock);
0772     pm_runtime_put_noidle(qspi->dev);
0773     pm_runtime_disable(qspi->dev);
0774     pm_runtime_set_suspended(qspi->dev);
0775     pm_runtime_dont_use_autosuspend(qspi->dev);
0776 err_dma_free:
0777     stm32_qspi_dma_free(qspi);
0778 err_clk_disable:
0779     clk_disable_unprepare(qspi->clk);
0780 
0781     return ret;
0782 }
0783 
0784 static int stm32_qspi_remove(struct platform_device *pdev)
0785 {
0786     struct stm32_qspi *qspi = platform_get_drvdata(pdev);
0787 
0788     pm_runtime_get_sync(qspi->dev);
0789     spi_unregister_master(qspi->ctrl);
0790     /* disable qspi */
0791     writel_relaxed(0, qspi->io_base + QSPI_CR);
0792     stm32_qspi_dma_free(qspi);
0793     mutex_destroy(&qspi->lock);
0794     pm_runtime_put_noidle(qspi->dev);
0795     pm_runtime_disable(qspi->dev);
0796     pm_runtime_set_suspended(qspi->dev);
0797     pm_runtime_dont_use_autosuspend(qspi->dev);
0798     clk_disable_unprepare(qspi->clk);
0799 
0800     return 0;
0801 }
0802 
0803 static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev)
0804 {
0805     struct stm32_qspi *qspi = dev_get_drvdata(dev);
0806 
0807     clk_disable_unprepare(qspi->clk);
0808 
0809     return 0;
0810 }
0811 
0812 static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev)
0813 {
0814     struct stm32_qspi *qspi = dev_get_drvdata(dev);
0815 
0816     return clk_prepare_enable(qspi->clk);
0817 }
0818 
0819 static int __maybe_unused stm32_qspi_suspend(struct device *dev)
0820 {
0821     pinctrl_pm_select_sleep_state(dev);
0822 
0823     return pm_runtime_force_suspend(dev);
0824 }
0825 
0826 static int __maybe_unused stm32_qspi_resume(struct device *dev)
0827 {
0828     struct stm32_qspi *qspi = dev_get_drvdata(dev);
0829     int ret;
0830 
0831     ret = pm_runtime_force_resume(dev);
0832     if (ret < 0)
0833         return ret;
0834 
0835     pinctrl_pm_select_default_state(dev);
0836 
0837     ret = pm_runtime_resume_and_get(dev);
0838     if (ret < 0)
0839         return ret;
0840 
0841     writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
0842     writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
0843 
0844     pm_runtime_mark_last_busy(dev);
0845     pm_runtime_put_autosuspend(dev);
0846 
0847     return 0;
0848 }
0849 
0850 static const struct dev_pm_ops stm32_qspi_pm_ops = {
0851     SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend,
0852                stm32_qspi_runtime_resume, NULL)
0853     SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume)
0854 };
0855 
0856 static const struct of_device_id stm32_qspi_match[] = {
0857     {.compatible = "st,stm32f469-qspi"},
0858     {}
0859 };
0860 MODULE_DEVICE_TABLE(of, stm32_qspi_match);
0861 
0862 static struct platform_driver stm32_qspi_driver = {
0863     .probe  = stm32_qspi_probe,
0864     .remove = stm32_qspi_remove,
0865     .driver = {
0866         .name = "stm32-qspi",
0867         .of_match_table = stm32_qspi_match,
0868         .pm = &stm32_qspi_pm_ops,
0869     },
0870 };
0871 module_platform_driver(stm32_qspi_driver);
0872 
0873 MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
0874 MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
0875 MODULE_LICENSE("GPL v2");