Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
0003 
0004 #include <linux/clk.h>
0005 #include <linux/dmaengine.h>
0006 #include <linux/dma-mapping.h>
0007 #include <linux/dma/qcom-gpi-dma.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/io.h>
0010 #include <linux/log2.h>
0011 #include <linux/module.h>
0012 #include <linux/platform_device.h>
0013 #include <linux/pm_opp.h>
0014 #include <linux/pm_runtime.h>
0015 #include <linux/qcom-geni-se.h>
0016 #include <linux/spi/spi.h>
0017 #include <linux/spinlock.h>
0018 
0019 /* SPI SE specific registers and respective register fields */
0020 #define SE_SPI_CPHA     0x224
0021 #define CPHA            BIT(0)
0022 
0023 #define SE_SPI_LOOPBACK     0x22c
0024 #define LOOPBACK_ENABLE     0x1
0025 #define NORMAL_MODE     0x0
0026 #define LOOPBACK_MSK        GENMASK(1, 0)
0027 
0028 #define SE_SPI_CPOL     0x230
0029 #define CPOL            BIT(2)
0030 
0031 #define SE_SPI_DEMUX_OUTPUT_INV 0x24c
0032 #define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
0033 
0034 #define SE_SPI_DEMUX_SEL    0x250
0035 #define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
0036 
0037 #define SE_SPI_TRANS_CFG    0x25c
0038 #define CS_TOGGLE       BIT(0)
0039 
0040 #define SE_SPI_WORD_LEN     0x268
0041 #define WORD_LEN_MSK        GENMASK(9, 0)
0042 #define MIN_WORD_LEN        4
0043 
0044 #define SE_SPI_TX_TRANS_LEN 0x26c
0045 #define SE_SPI_RX_TRANS_LEN 0x270
0046 #define TRANS_LEN_MSK       GENMASK(23, 0)
0047 
0048 #define SE_SPI_PRE_POST_CMD_DLY 0x274
0049 
0050 #define SE_SPI_DELAY_COUNTERS   0x278
0051 #define SPI_INTER_WORDS_DELAY_MSK   GENMASK(9, 0)
0052 #define SPI_CS_CLK_DELAY_MSK        GENMASK(19, 10)
0053 #define SPI_CS_CLK_DELAY_SHFT       10
0054 
0055 /* M_CMD OP codes for SPI */
0056 #define SPI_TX_ONLY     1
0057 #define SPI_RX_ONLY     2
0058 #define SPI_TX_RX       7
0059 #define SPI_CS_ASSERT       8
0060 #define SPI_CS_DEASSERT     9
0061 #define SPI_SCK_ONLY        10
0062 /* M_CMD params for SPI */
0063 #define SPI_PRE_CMD_DELAY   BIT(0)
0064 #define TIMESTAMP_BEFORE    BIT(1)
0065 #define FRAGMENTATION       BIT(2)
0066 #define TIMESTAMP_AFTER     BIT(3)
0067 #define POST_CMD_DELAY      BIT(4)
0068 
0069 #define GSI_LOOPBACK_EN     BIT(0)
0070 #define GSI_CS_TOGGLE       BIT(3)
0071 #define GSI_CPHA        BIT(4)
0072 #define GSI_CPOL        BIT(5)
0073 
0074 struct spi_geni_master {
0075     struct geni_se se;
0076     struct device *dev;
0077     u32 tx_fifo_depth;
0078     u32 fifo_width_bits;
0079     u32 tx_wm;
0080     u32 last_mode;
0081     unsigned long cur_speed_hz;
0082     unsigned long cur_sclk_hz;
0083     unsigned int cur_bits_per_word;
0084     unsigned int tx_rem_bytes;
0085     unsigned int rx_rem_bytes;
0086     const struct spi_transfer *cur_xfer;
0087     struct completion cs_done;
0088     struct completion cancel_done;
0089     struct completion abort_done;
0090     unsigned int oversampling;
0091     spinlock_t lock;
0092     int irq;
0093     bool cs_flag;
0094     bool abort_failed;
0095     struct dma_chan *tx;
0096     struct dma_chan *rx;
0097     int cur_xfer_mode;
0098 };
0099 
0100 static int get_spi_clk_cfg(unsigned int speed_hz,
0101             struct spi_geni_master *mas,
0102             unsigned int *clk_idx,
0103             unsigned int *clk_div)
0104 {
0105     unsigned long sclk_freq;
0106     unsigned int actual_hz;
0107     int ret;
0108 
0109     ret = geni_se_clk_freq_match(&mas->se,
0110                 speed_hz * mas->oversampling,
0111                 clk_idx, &sclk_freq, false);
0112     if (ret) {
0113         dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
0114                             ret, speed_hz);
0115         return ret;
0116     }
0117 
0118     *clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
0119     actual_hz = sclk_freq / (mas->oversampling * *clk_div);
0120 
0121     dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
0122                 actual_hz, sclk_freq, *clk_idx, *clk_div);
0123     ret = dev_pm_opp_set_rate(mas->dev, sclk_freq);
0124     if (ret)
0125         dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n", ret);
0126     else
0127         mas->cur_sclk_hz = sclk_freq;
0128 
0129     return ret;
0130 }
0131 
0132 static void handle_fifo_timeout(struct spi_master *spi,
0133                 struct spi_message *msg)
0134 {
0135     struct spi_geni_master *mas = spi_master_get_devdata(spi);
0136     unsigned long time_left;
0137     struct geni_se *se = &mas->se;
0138 
0139     spin_lock_irq(&mas->lock);
0140     reinit_completion(&mas->cancel_done);
0141     writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
0142     mas->cur_xfer = NULL;
0143     geni_se_cancel_m_cmd(se);
0144     spin_unlock_irq(&mas->lock);
0145 
0146     time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
0147     if (time_left)
0148         return;
0149 
0150     spin_lock_irq(&mas->lock);
0151     reinit_completion(&mas->abort_done);
0152     geni_se_abort_m_cmd(se);
0153     spin_unlock_irq(&mas->lock);
0154 
0155     time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
0156     if (!time_left) {
0157         dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
0158 
0159         /*
0160          * No need for a lock since SPI core has a lock and we never
0161          * access this from an interrupt.
0162          */
0163         mas->abort_failed = true;
0164     }
0165 }
0166 
0167 static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
0168 {
0169     struct spi_geni_master *mas = spi_master_get_devdata(spi);
0170 
0171     dmaengine_terminate_sync(mas->tx);
0172     dmaengine_terminate_sync(mas->rx);
0173 }
0174 
0175 static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
0176 {
0177     struct spi_geni_master *mas = spi_master_get_devdata(spi);
0178 
0179     switch (mas->cur_xfer_mode) {
0180     case GENI_SE_FIFO:
0181         handle_fifo_timeout(spi, msg);
0182         break;
0183     case GENI_GPI_DMA:
0184         handle_gpi_timeout(spi, msg);
0185         break;
0186     default:
0187         dev_err(mas->dev, "Abort on Mode:%d not supported", mas->cur_xfer_mode);
0188     }
0189 }
0190 
0191 static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
0192 {
0193     struct geni_se *se = &mas->se;
0194     u32 m_irq, m_irq_en;
0195 
0196     if (!mas->abort_failed)
0197         return false;
0198 
0199     /*
0200      * The only known case where a transfer times out and then a cancel
0201      * times out then an abort times out is if something is blocking our
0202      * interrupt handler from running.  Avoid starting any new transfers
0203      * until that sorts itself out.
0204      */
0205     spin_lock_irq(&mas->lock);
0206     m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
0207     m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
0208     spin_unlock_irq(&mas->lock);
0209 
0210     if (m_irq & m_irq_en) {
0211         dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
0212             m_irq & m_irq_en);
0213         return true;
0214     }
0215 
0216     /*
0217      * If we're here the problem resolved itself so no need to check more
0218      * on future transfers.
0219      */
0220     mas->abort_failed = false;
0221 
0222     return false;
0223 }
0224 
0225 static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
0226 {
0227     struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
0228     struct spi_master *spi = dev_get_drvdata(mas->dev);
0229     struct geni_se *se = &mas->se;
0230     unsigned long time_left;
0231 
0232     if (!(slv->mode & SPI_CS_HIGH))
0233         set_flag = !set_flag;
0234 
0235     if (set_flag == mas->cs_flag)
0236         return;
0237 
0238     pm_runtime_get_sync(mas->dev);
0239 
0240     if (spi_geni_is_abort_still_pending(mas)) {
0241         dev_err(mas->dev, "Can't set chip select\n");
0242         goto exit;
0243     }
0244 
0245     spin_lock_irq(&mas->lock);
0246     if (mas->cur_xfer) {
0247         dev_err(mas->dev, "Can't set CS when prev xfer running\n");
0248         spin_unlock_irq(&mas->lock);
0249         goto exit;
0250     }
0251 
0252     mas->cs_flag = set_flag;
0253     reinit_completion(&mas->cs_done);
0254     if (set_flag)
0255         geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
0256     else
0257         geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
0258     spin_unlock_irq(&mas->lock);
0259 
0260     time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
0261     if (!time_left) {
0262         dev_warn(mas->dev, "Timeout setting chip select\n");
0263         handle_fifo_timeout(spi, NULL);
0264     }
0265 
0266 exit:
0267     pm_runtime_put(mas->dev);
0268 }
0269 
0270 static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
0271                     unsigned int bits_per_word)
0272 {
0273     unsigned int pack_words;
0274     bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
0275     struct geni_se *se = &mas->se;
0276     u32 word_len;
0277 
0278     /*
0279      * If bits_per_word isn't a byte aligned value, set the packing to be
0280      * 1 SPI word per FIFO word.
0281      */
0282     if (!(mas->fifo_width_bits % bits_per_word))
0283         pack_words = mas->fifo_width_bits / bits_per_word;
0284     else
0285         pack_words = 1;
0286     geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
0287                                 true, true);
0288     word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK;
0289     writel(word_len, se->base + SE_SPI_WORD_LEN);
0290 }
0291 
0292 static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
0293                     unsigned long clk_hz)
0294 {
0295     u32 clk_sel, m_clk_cfg, idx, div;
0296     struct geni_se *se = &mas->se;
0297     int ret;
0298 
0299     if (clk_hz == mas->cur_speed_hz)
0300         return 0;
0301 
0302     ret = get_spi_clk_cfg(clk_hz, mas, &idx, &div);
0303     if (ret) {
0304         dev_err(mas->dev, "Err setting clk to %lu: %d\n", clk_hz, ret);
0305         return ret;
0306     }
0307 
0308     /*
0309      * SPI core clock gets configured with the requested frequency
0310      * or the frequency closer to the requested frequency.
0311      * For that reason requested frequency is stored in the
0312      * cur_speed_hz and referred in the consecutive transfer instead
0313      * of calling clk_get_rate() API.
0314      */
0315     mas->cur_speed_hz = clk_hz;
0316 
0317     clk_sel = idx & CLK_SEL_MSK;
0318     m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
0319     writel(clk_sel, se->base + SE_GENI_CLK_SEL);
0320     writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
0321 
0322     /* Set BW quota for CPU as driver supports FIFO mode only. */
0323     se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz);
0324     ret = geni_icc_set_bw(se);
0325     if (ret)
0326         return ret;
0327 
0328     return 0;
0329 }
0330 
0331 static int setup_fifo_params(struct spi_device *spi_slv,
0332                     struct spi_master *spi)
0333 {
0334     struct spi_geni_master *mas = spi_master_get_devdata(spi);
0335     struct geni_se *se = &mas->se;
0336     u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
0337     u32 demux_sel;
0338 
0339     if (mas->last_mode != spi_slv->mode) {
0340         if (spi_slv->mode & SPI_LOOP)
0341             loopback_cfg = LOOPBACK_ENABLE;
0342 
0343         if (spi_slv->mode & SPI_CPOL)
0344             cpol = CPOL;
0345 
0346         if (spi_slv->mode & SPI_CPHA)
0347             cpha = CPHA;
0348 
0349         if (spi_slv->mode & SPI_CS_HIGH)
0350             demux_output_inv = BIT(spi_slv->chip_select);
0351 
0352         demux_sel = spi_slv->chip_select;
0353         mas->cur_bits_per_word = spi_slv->bits_per_word;
0354 
0355         spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
0356         writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
0357         writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
0358         writel(cpha, se->base + SE_SPI_CPHA);
0359         writel(cpol, se->base + SE_SPI_CPOL);
0360         writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
0361 
0362         mas->last_mode = spi_slv->mode;
0363     }
0364 
0365     return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
0366 }
0367 
0368 static void
0369 spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
0370 {
0371     struct spi_master *spi = cb;
0372 
0373     spi->cur_msg->status = -EIO;
0374     if (result->result != DMA_TRANS_NOERROR) {
0375         dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
0376         spi_finalize_current_transfer(spi);
0377         return;
0378     }
0379 
0380     if (!result->residue) {
0381         spi->cur_msg->status = 0;
0382         dev_dbg(&spi->dev, "DMA txn completed\n");
0383     } else {
0384         dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
0385     }
0386 
0387     spi_finalize_current_transfer(spi);
0388 }
0389 
0390 static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
0391               struct spi_device *spi_slv, struct spi_master *spi)
0392 {
0393     unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
0394     struct dma_slave_config config = {};
0395     struct gpi_spi_config peripheral = {};
0396     struct dma_async_tx_descriptor *tx_desc, *rx_desc;
0397     int ret;
0398 
0399     config.peripheral_config = &peripheral;
0400     config.peripheral_size = sizeof(peripheral);
0401     peripheral.set_config = true;
0402 
0403     if (xfer->bits_per_word != mas->cur_bits_per_word ||
0404         xfer->speed_hz != mas->cur_speed_hz) {
0405         mas->cur_bits_per_word = xfer->bits_per_word;
0406         mas->cur_speed_hz = xfer->speed_hz;
0407     }
0408 
0409     if (xfer->tx_buf && xfer->rx_buf) {
0410         peripheral.cmd = SPI_DUPLEX;
0411     } else if (xfer->tx_buf) {
0412         peripheral.cmd = SPI_TX;
0413         peripheral.rx_len = 0;
0414     } else if (xfer->rx_buf) {
0415         peripheral.cmd = SPI_RX;
0416         if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
0417             peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
0418         } else {
0419             int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
0420 
0421             peripheral.rx_len = (xfer->len / bytes_per_word);
0422         }
0423     }
0424 
0425     peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP);
0426     peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL);
0427     peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA);
0428     peripheral.cs = spi_slv->chip_select;
0429     peripheral.pack_en = true;
0430     peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
0431 
0432     ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
0433                   &peripheral.clk_src, &peripheral.clk_div);
0434     if (ret) {
0435         dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret);
0436         return ret;
0437     }
0438 
0439     if (!xfer->cs_change) {
0440         if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
0441             peripheral.fragmentation = FRAGMENTATION;
0442     }
0443 
0444     if (peripheral.cmd & SPI_RX) {
0445         dmaengine_slave_config(mas->rx, &config);
0446         rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
0447                           DMA_DEV_TO_MEM, flags);
0448         if (!rx_desc) {
0449             dev_err(mas->dev, "Err setting up rx desc\n");
0450             return -EIO;
0451         }
0452     }
0453 
0454     /*
0455      * Prepare the TX always, even for RX or tx_buf being null, we would
0456      * need TX to be prepared per GSI spec
0457      */
0458     dmaengine_slave_config(mas->tx, &config);
0459     tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents,
0460                       DMA_MEM_TO_DEV, flags);
0461     if (!tx_desc) {
0462         dev_err(mas->dev, "Err setting up tx desc\n");
0463         return -EIO;
0464     }
0465 
0466     tx_desc->callback_result = spi_gsi_callback_result;
0467     tx_desc->callback_param = spi;
0468 
0469     if (peripheral.cmd & SPI_RX)
0470         dmaengine_submit(rx_desc);
0471     dmaengine_submit(tx_desc);
0472 
0473     if (peripheral.cmd & SPI_RX)
0474         dma_async_issue_pending(mas->rx);
0475 
0476     dma_async_issue_pending(mas->tx);
0477     return 1;
0478 }
0479 
0480 static bool geni_can_dma(struct spi_controller *ctlr,
0481              struct spi_device *slv, struct spi_transfer *xfer)
0482 {
0483     struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
0484 
0485     /* check if dma is supported */
0486     return mas->cur_xfer_mode != GENI_SE_FIFO;
0487 }
0488 
0489 static int spi_geni_prepare_message(struct spi_master *spi,
0490                     struct spi_message *spi_msg)
0491 {
0492     struct spi_geni_master *mas = spi_master_get_devdata(spi);
0493     int ret;
0494 
0495     switch (mas->cur_xfer_mode) {
0496     case GENI_SE_FIFO:
0497         if (spi_geni_is_abort_still_pending(mas))
0498             return -EBUSY;
0499         ret = setup_fifo_params(spi_msg->spi, spi);
0500         if (ret)
0501             dev_err(mas->dev, "Couldn't select mode %d\n", ret);
0502         return ret;
0503 
0504     case GENI_GPI_DMA:
0505         /* nothing to do for GPI DMA */
0506         return 0;
0507     }
0508 
0509     dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode);
0510     return -EINVAL;
0511 }
0512 
0513 static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
0514 {
0515     int ret;
0516 
0517     mas->tx = dma_request_chan(mas->dev, "tx");
0518     if (IS_ERR(mas->tx)) {
0519         ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
0520                     "Failed to get tx DMA ch\n");
0521         goto err_tx;
0522     }
0523 
0524     mas->rx = dma_request_chan(mas->dev, "rx");
0525     if (IS_ERR(mas->rx)) {
0526         ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
0527                     "Failed to get rx DMA ch\n");
0528         goto err_rx;
0529     }
0530 
0531     return 0;
0532 
0533 err_rx:
0534     mas->rx = NULL;
0535     dma_release_channel(mas->tx);
0536 err_tx:
0537     mas->tx = NULL;
0538     return ret;
0539 }
0540 
0541 static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
0542 {
0543     if (mas->rx) {
0544         dma_release_channel(mas->rx);
0545         mas->rx = NULL;
0546     }
0547 
0548     if (mas->tx) {
0549         dma_release_channel(mas->tx);
0550         mas->tx = NULL;
0551     }
0552 }
0553 
0554 static int spi_geni_init(struct spi_geni_master *mas)
0555 {
0556     struct geni_se *se = &mas->se;
0557     unsigned int proto, major, minor, ver;
0558     u32 spi_tx_cfg, fifo_disable;
0559     int ret = -ENXIO;
0560 
0561     pm_runtime_get_sync(mas->dev);
0562 
0563     proto = geni_se_read_proto(se);
0564     if (proto != GENI_SE_SPI) {
0565         dev_err(mas->dev, "Invalid proto %d\n", proto);
0566         goto out_pm;
0567     }
0568     mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
0569 
0570     /* Width of Tx and Rx FIFO is same */
0571     mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
0572 
0573     /*
0574      * Hardware programming guide suggests to configure
0575      * RX FIFO RFR level to fifo_depth-2.
0576      */
0577     geni_se_init(se, mas->tx_fifo_depth - 3, mas->tx_fifo_depth - 2);
0578     /* Transmit an entire FIFO worth of data per IRQ */
0579     mas->tx_wm = 1;
0580     ver = geni_se_get_qup_hw_version(se);
0581     major = GENI_SE_VERSION_MAJOR(ver);
0582     minor = GENI_SE_VERSION_MINOR(ver);
0583 
0584     if (major == 1 && minor == 0)
0585         mas->oversampling = 2;
0586     else
0587         mas->oversampling = 1;
0588 
0589     fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
0590     switch (fifo_disable) {
0591     case 1:
0592         ret = spi_geni_grab_gpi_chan(mas);
0593         if (!ret) { /* success case */
0594             mas->cur_xfer_mode = GENI_GPI_DMA;
0595             geni_se_select_mode(se, GENI_GPI_DMA);
0596             dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
0597             break;
0598         }
0599         /*
0600          * in case of failure to get dma channel, we can still do the
0601          * FIFO mode, so fallthrough
0602          */
0603         dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
0604         fallthrough;
0605 
0606     case 0:
0607         mas->cur_xfer_mode = GENI_SE_FIFO;
0608         geni_se_select_mode(se, GENI_SE_FIFO);
0609         ret = 0;
0610         break;
0611     }
0612 
0613     /* We always control CS manually */
0614     spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
0615     spi_tx_cfg &= ~CS_TOGGLE;
0616     writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
0617 
0618 out_pm:
0619     pm_runtime_put(mas->dev);
0620     return ret;
0621 }
0622 
0623 static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
0624 {
0625     /*
0626      * Calculate how many bytes we'll put in each FIFO word.  If the
0627      * transfer words don't pack cleanly into a FIFO word we'll just put
0628      * one transfer word in each FIFO word.  If they do pack we'll pack 'em.
0629      */
0630     if (mas->fifo_width_bits % mas->cur_bits_per_word)
0631         return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
0632                                BITS_PER_BYTE));
0633 
0634     return mas->fifo_width_bits / BITS_PER_BYTE;
0635 }
0636 
0637 static bool geni_spi_handle_tx(struct spi_geni_master *mas)
0638 {
0639     struct geni_se *se = &mas->se;
0640     unsigned int max_bytes;
0641     const u8 *tx_buf;
0642     unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
0643     unsigned int i = 0;
0644 
0645     /* Stop the watermark IRQ if nothing to send */
0646     if (!mas->cur_xfer) {
0647         writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
0648         return false;
0649     }
0650 
0651     max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
0652     if (mas->tx_rem_bytes < max_bytes)
0653         max_bytes = mas->tx_rem_bytes;
0654 
0655     tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
0656     while (i < max_bytes) {
0657         unsigned int j;
0658         unsigned int bytes_to_write;
0659         u32 fifo_word = 0;
0660         u8 *fifo_byte = (u8 *)&fifo_word;
0661 
0662         bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
0663         for (j = 0; j < bytes_to_write; j++)
0664             fifo_byte[j] = tx_buf[i++];
0665         iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
0666     }
0667     mas->tx_rem_bytes -= max_bytes;
0668     if (!mas->tx_rem_bytes) {
0669         writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
0670         return false;
0671     }
0672     return true;
0673 }
0674 
0675 static void geni_spi_handle_rx(struct spi_geni_master *mas)
0676 {
0677     struct geni_se *se = &mas->se;
0678     u32 rx_fifo_status;
0679     unsigned int rx_bytes;
0680     unsigned int rx_last_byte_valid;
0681     u8 *rx_buf;
0682     unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
0683     unsigned int i = 0;
0684 
0685     rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
0686     rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
0687     if (rx_fifo_status & RX_LAST) {
0688         rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
0689         rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
0690         if (rx_last_byte_valid && rx_last_byte_valid < 4)
0691             rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
0692     }
0693 
0694     /* Clear out the FIFO and bail if nowhere to put it */
0695     if (!mas->cur_xfer) {
0696         for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
0697             readl(se->base + SE_GENI_RX_FIFOn);
0698         return;
0699     }
0700 
0701     if (mas->rx_rem_bytes < rx_bytes)
0702         rx_bytes = mas->rx_rem_bytes;
0703 
0704     rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
0705     while (i < rx_bytes) {
0706         u32 fifo_word = 0;
0707         u8 *fifo_byte = (u8 *)&fifo_word;
0708         unsigned int bytes_to_read;
0709         unsigned int j;
0710 
0711         bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
0712         ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
0713         for (j = 0; j < bytes_to_read; j++)
0714             rx_buf[i++] = fifo_byte[j];
0715     }
0716     mas->rx_rem_bytes -= rx_bytes;
0717 }
0718 
0719 static void setup_fifo_xfer(struct spi_transfer *xfer,
0720                 struct spi_geni_master *mas,
0721                 u16 mode, struct spi_master *spi)
0722 {
0723     u32 m_cmd = 0;
0724     u32 len;
0725     struct geni_se *se = &mas->se;
0726     int ret;
0727 
0728     /*
0729      * Ensure that our interrupt handler isn't still running from some
0730      * prior command before we start messing with the hardware behind
0731      * its back.  We don't need to _keep_ the lock here since we're only
0732      * worried about racing with out interrupt handler.  The SPI core
0733      * already handles making sure that we're not trying to do two
0734      * transfers at once or setting a chip select and doing a transfer
0735      * concurrently.
0736      *
0737      * NOTE: we actually _can't_ hold the lock here because possibly we
0738      * might call clk_set_rate() which needs to be able to sleep.
0739      */
0740     spin_lock_irq(&mas->lock);
0741     spin_unlock_irq(&mas->lock);
0742 
0743     if (xfer->bits_per_word != mas->cur_bits_per_word) {
0744         spi_setup_word_len(mas, mode, xfer->bits_per_word);
0745         mas->cur_bits_per_word = xfer->bits_per_word;
0746     }
0747 
0748     /* Speed and bits per word can be overridden per transfer */
0749     ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
0750     if (ret)
0751         return;
0752 
0753     mas->tx_rem_bytes = 0;
0754     mas->rx_rem_bytes = 0;
0755 
0756     if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
0757         len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
0758     else
0759         len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
0760     len &= TRANS_LEN_MSK;
0761 
0762     mas->cur_xfer = xfer;
0763     if (xfer->tx_buf) {
0764         m_cmd |= SPI_TX_ONLY;
0765         mas->tx_rem_bytes = xfer->len;
0766         writel(len, se->base + SE_SPI_TX_TRANS_LEN);
0767     }
0768 
0769     if (xfer->rx_buf) {
0770         m_cmd |= SPI_RX_ONLY;
0771         writel(len, se->base + SE_SPI_RX_TRANS_LEN);
0772         mas->rx_rem_bytes = xfer->len;
0773     }
0774 
0775     /*
0776      * Lock around right before we start the transfer since our
0777      * interrupt could come in at any time now.
0778      */
0779     spin_lock_irq(&mas->lock);
0780     geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
0781     if (m_cmd & SPI_TX_ONLY) {
0782         if (geni_spi_handle_tx(mas))
0783             writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
0784     }
0785     spin_unlock_irq(&mas->lock);
0786 }
0787 
0788 static int spi_geni_transfer_one(struct spi_master *spi,
0789                 struct spi_device *slv,
0790                 struct spi_transfer *xfer)
0791 {
0792     struct spi_geni_master *mas = spi_master_get_devdata(spi);
0793 
0794     if (spi_geni_is_abort_still_pending(mas))
0795         return -EBUSY;
0796 
0797     /* Terminate and return success for 0 byte length transfer */
0798     if (!xfer->len)
0799         return 0;
0800 
0801     if (mas->cur_xfer_mode == GENI_SE_FIFO) {
0802         setup_fifo_xfer(xfer, mas, slv->mode, spi);
0803         return 1;
0804     }
0805     return setup_gsi_xfer(xfer, mas, slv, spi);
0806 }
0807 
0808 static irqreturn_t geni_spi_isr(int irq, void *data)
0809 {
0810     struct spi_master *spi = data;
0811     struct spi_geni_master *mas = spi_master_get_devdata(spi);
0812     struct geni_se *se = &mas->se;
0813     u32 m_irq;
0814 
0815     m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
0816     if (!m_irq)
0817         return IRQ_NONE;
0818 
0819     if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |
0820              M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN |
0821              M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN))
0822         dev_warn(mas->dev, "Unexpected IRQ err status %#010x\n", m_irq);
0823 
0824     spin_lock(&mas->lock);
0825 
0826     if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
0827         geni_spi_handle_rx(mas);
0828 
0829     if (m_irq & M_TX_FIFO_WATERMARK_EN)
0830         geni_spi_handle_tx(mas);
0831 
0832     if (m_irq & M_CMD_DONE_EN) {
0833         if (mas->cur_xfer) {
0834             spi_finalize_current_transfer(spi);
0835             mas->cur_xfer = NULL;
0836             /*
0837              * If this happens, then a CMD_DONE came before all the
0838              * Tx buffer bytes were sent out. This is unusual, log
0839              * this condition and disable the WM interrupt to
0840              * prevent the system from stalling due an interrupt
0841              * storm.
0842              *
0843              * If this happens when all Rx bytes haven't been
0844              * received, log the condition. The only known time
0845              * this can happen is if bits_per_word != 8 and some
0846              * registers that expect xfer lengths in num spi_words
0847              * weren't written correctly.
0848              */
0849             if (mas->tx_rem_bytes) {
0850                 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
0851                 dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
0852                     mas->tx_rem_bytes, mas->cur_bits_per_word);
0853             }
0854             if (mas->rx_rem_bytes)
0855                 dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
0856                     mas->rx_rem_bytes, mas->cur_bits_per_word);
0857         } else {
0858             complete(&mas->cs_done);
0859         }
0860     }
0861 
0862     if (m_irq & M_CMD_CANCEL_EN)
0863         complete(&mas->cancel_done);
0864     if (m_irq & M_CMD_ABORT_EN)
0865         complete(&mas->abort_done);
0866 
0867     /*
0868      * It's safe or a good idea to Ack all of our interrupts at the end
0869      * of the function. Specifically:
0870      * - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
0871      *   clearing Acks. Clearing at the end relies on nobody else having
0872      *   started a new transfer yet or else we could be clearing _their_
0873      *   done bit, but everyone grabs the spinlock before starting a new
0874      *   transfer.
0875      * - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear
0876      *   to be "latched level" interrupts so it's important to clear them
0877      *   _after_ you've handled the condition and always safe to do so
0878      *   since they'll re-assert if they're still happening.
0879      */
0880     writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
0881 
0882     spin_unlock(&mas->lock);
0883 
0884     return IRQ_HANDLED;
0885 }
0886 
0887 static int spi_geni_probe(struct platform_device *pdev)
0888 {
0889     int ret, irq;
0890     struct spi_master *spi;
0891     struct spi_geni_master *mas;
0892     void __iomem *base;
0893     struct clk *clk;
0894     struct device *dev = &pdev->dev;
0895 
0896     irq = platform_get_irq(pdev, 0);
0897     if (irq < 0)
0898         return irq;
0899 
0900     ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
0901     if (ret)
0902         return dev_err_probe(dev, ret, "could not set DMA mask\n");
0903 
0904     base = devm_platform_ioremap_resource(pdev, 0);
0905     if (IS_ERR(base))
0906         return PTR_ERR(base);
0907 
0908     clk = devm_clk_get(dev, "se");
0909     if (IS_ERR(clk))
0910         return PTR_ERR(clk);
0911 
0912     spi = devm_spi_alloc_master(dev, sizeof(*mas));
0913     if (!spi)
0914         return -ENOMEM;
0915 
0916     platform_set_drvdata(pdev, spi);
0917     mas = spi_master_get_devdata(spi);
0918     mas->irq = irq;
0919     mas->dev = dev;
0920     mas->se.dev = dev;
0921     mas->se.wrapper = dev_get_drvdata(dev->parent);
0922     mas->se.base = base;
0923     mas->se.clk = clk;
0924 
0925     ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
0926     if (ret)
0927         return ret;
0928     /* OPP table is optional */
0929     ret = devm_pm_opp_of_add_table(&pdev->dev);
0930     if (ret && ret != -ENODEV) {
0931         dev_err(&pdev->dev, "invalid OPP table in device tree\n");
0932         return ret;
0933     }
0934 
0935     spi->bus_num = -1;
0936     spi->dev.of_node = dev->of_node;
0937     spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
0938     spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
0939     spi->num_chipselect = 4;
0940     spi->max_speed_hz = 50000000;
0941     spi->prepare_message = spi_geni_prepare_message;
0942     spi->transfer_one = spi_geni_transfer_one;
0943     spi->can_dma = geni_can_dma;
0944     spi->dma_map_dev = dev->parent;
0945     spi->auto_runtime_pm = true;
0946     spi->handle_err = spi_geni_handle_err;
0947     spi->use_gpio_descriptors = true;
0948 
0949     init_completion(&mas->cs_done);
0950     init_completion(&mas->cancel_done);
0951     init_completion(&mas->abort_done);
0952     spin_lock_init(&mas->lock);
0953     pm_runtime_use_autosuspend(&pdev->dev);
0954     pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
0955     pm_runtime_enable(dev);
0956 
0957     ret = geni_icc_get(&mas->se, NULL);
0958     if (ret)
0959         goto spi_geni_probe_runtime_disable;
0960     /* Set the bus quota to a reasonable value for register access */
0961     mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
0962     mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
0963 
0964     ret = geni_icc_set_bw(&mas->se);
0965     if (ret)
0966         goto spi_geni_probe_runtime_disable;
0967 
0968     ret = spi_geni_init(mas);
0969     if (ret)
0970         goto spi_geni_probe_runtime_disable;
0971 
0972     /*
0973      * check the mode supported and set_cs for fifo mode only
0974      * for dma (gsi) mode, the gsi will set cs based on params passed in
0975      * TRE
0976      */
0977     if (mas->cur_xfer_mode == GENI_SE_FIFO)
0978         spi->set_cs = spi_geni_set_cs;
0979 
0980     ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
0981     if (ret)
0982         goto spi_geni_release_dma;
0983 
0984     ret = spi_register_master(spi);
0985     if (ret)
0986         goto spi_geni_probe_free_irq;
0987 
0988     return 0;
0989 spi_geni_probe_free_irq:
0990     free_irq(mas->irq, spi);
0991 spi_geni_release_dma:
0992     spi_geni_release_dma_chan(mas);
0993 spi_geni_probe_runtime_disable:
0994     pm_runtime_disable(dev);
0995     return ret;
0996 }
0997 
0998 static int spi_geni_remove(struct platform_device *pdev)
0999 {
1000     struct spi_master *spi = platform_get_drvdata(pdev);
1001     struct spi_geni_master *mas = spi_master_get_devdata(spi);
1002 
1003     /* Unregister _before_ disabling pm_runtime() so we stop transfers */
1004     spi_unregister_master(spi);
1005 
1006     spi_geni_release_dma_chan(mas);
1007 
1008     free_irq(mas->irq, spi);
1009     pm_runtime_disable(&pdev->dev);
1010     return 0;
1011 }
1012 
1013 static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
1014 {
1015     struct spi_master *spi = dev_get_drvdata(dev);
1016     struct spi_geni_master *mas = spi_master_get_devdata(spi);
1017     int ret;
1018 
1019     /* Drop the performance state vote */
1020     dev_pm_opp_set_rate(dev, 0);
1021 
1022     ret = geni_se_resources_off(&mas->se);
1023     if (ret)
1024         return ret;
1025 
1026     return geni_icc_disable(&mas->se);
1027 }
1028 
1029 static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
1030 {
1031     struct spi_master *spi = dev_get_drvdata(dev);
1032     struct spi_geni_master *mas = spi_master_get_devdata(spi);
1033     int ret;
1034 
1035     ret = geni_icc_enable(&mas->se);
1036     if (ret)
1037         return ret;
1038 
1039     ret = geni_se_resources_on(&mas->se);
1040     if (ret)
1041         return ret;
1042 
1043     return dev_pm_opp_set_rate(mas->dev, mas->cur_sclk_hz);
1044 }
1045 
1046 static int __maybe_unused spi_geni_suspend(struct device *dev)
1047 {
1048     struct spi_master *spi = dev_get_drvdata(dev);
1049     int ret;
1050 
1051     ret = spi_master_suspend(spi);
1052     if (ret)
1053         return ret;
1054 
1055     ret = pm_runtime_force_suspend(dev);
1056     if (ret)
1057         spi_master_resume(spi);
1058 
1059     return ret;
1060 }
1061 
1062 static int __maybe_unused spi_geni_resume(struct device *dev)
1063 {
1064     struct spi_master *spi = dev_get_drvdata(dev);
1065     int ret;
1066 
1067     ret = pm_runtime_force_resume(dev);
1068     if (ret)
1069         return ret;
1070 
1071     ret = spi_master_resume(spi);
1072     if (ret)
1073         pm_runtime_force_suspend(dev);
1074 
1075     return ret;
1076 }
1077 
1078 static const struct dev_pm_ops spi_geni_pm_ops = {
1079     SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
1080                     spi_geni_runtime_resume, NULL)
1081     SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
1082 };
1083 
1084 static const struct of_device_id spi_geni_dt_match[] = {
1085     { .compatible = "qcom,geni-spi" },
1086     {}
1087 };
1088 MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
1089 
1090 static struct platform_driver spi_geni_driver = {
1091     .probe  = spi_geni_probe,
1092     .remove = spi_geni_remove,
1093     .driver = {
1094         .name = "geni_spi",
1095         .pm = &spi_geni_pm_ops,
1096         .of_match_table = spi_geni_dt_match,
1097     },
1098 };
1099 module_platform_driver(spi_geni_driver);
1100 
1101 MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
1102 MODULE_LICENSE("GPL v2");