Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * PXA2xx SPI DMA engine support.
0004  *
0005  * Copyright (C) 2013, 2021 Intel Corporation
0006  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
0007  */
0008 
0009 #include <linux/device.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/scatterlist.h>
0013 #include <linux/sizes.h>
0014 
0015 #include <linux/spi/pxa2xx_spi.h>
0016 #include <linux/spi/spi.h>
0017 
0018 #include "spi-pxa2xx.h"
0019 
0020 static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
0021                          bool error)
0022 {
0023     struct spi_message *msg = drv_data->controller->cur_msg;
0024 
0025     /*
0026      * It is possible that one CPU is handling ROR interrupt and other
0027      * just gets DMA completion. Calling pump_transfers() twice for the
0028      * same transfer leads to problems thus we prevent concurrent calls
0029      * by using dma_running.
0030      */
0031     if (atomic_dec_and_test(&drv_data->dma_running)) {
0032         /*
0033          * If the other CPU is still handling the ROR interrupt we
0034          * might not know about the error yet. So we re-check the
0035          * ROR bit here before we clear the status register.
0036          */
0037         if (!error)
0038             error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR;
0039 
0040         /* Clear status & disable interrupts */
0041         clear_SSCR1_bits(drv_data, drv_data->dma_cr1);
0042         write_SSSR_CS(drv_data, drv_data->clear_sr);
0043         if (!pxa25x_ssp_comp(drv_data))
0044             pxa2xx_spi_write(drv_data, SSTO, 0);
0045 
0046         if (error) {
0047             /* In case we got an error we disable the SSP now */
0048             pxa_ssp_disable(drv_data->ssp);
0049             msg->status = -EIO;
0050         }
0051 
0052         spi_finalize_current_transfer(drv_data->controller);
0053     }
0054 }
0055 
0056 static void pxa2xx_spi_dma_callback(void *data)
0057 {
0058     pxa2xx_spi_dma_transfer_complete(data, false);
0059 }
0060 
0061 static struct dma_async_tx_descriptor *
0062 pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
0063                enum dma_transfer_direction dir,
0064                struct spi_transfer *xfer)
0065 {
0066     struct chip_data *chip =
0067         spi_get_ctldata(drv_data->controller->cur_msg->spi);
0068     enum dma_slave_buswidth width;
0069     struct dma_slave_config cfg;
0070     struct dma_chan *chan;
0071     struct sg_table *sgt;
0072     int ret;
0073 
0074     switch (drv_data->n_bytes) {
0075     case 1:
0076         width = DMA_SLAVE_BUSWIDTH_1_BYTE;
0077         break;
0078     case 2:
0079         width = DMA_SLAVE_BUSWIDTH_2_BYTES;
0080         break;
0081     default:
0082         width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0083         break;
0084     }
0085 
0086     memset(&cfg, 0, sizeof(cfg));
0087     cfg.direction = dir;
0088 
0089     if (dir == DMA_MEM_TO_DEV) {
0090         cfg.dst_addr = drv_data->ssp->phys_base + SSDR;
0091         cfg.dst_addr_width = width;
0092         cfg.dst_maxburst = chip->dma_burst_size;
0093 
0094         sgt = &xfer->tx_sg;
0095         chan = drv_data->controller->dma_tx;
0096     } else {
0097         cfg.src_addr = drv_data->ssp->phys_base + SSDR;
0098         cfg.src_addr_width = width;
0099         cfg.src_maxburst = chip->dma_burst_size;
0100 
0101         sgt = &xfer->rx_sg;
0102         chan = drv_data->controller->dma_rx;
0103     }
0104 
0105     ret = dmaengine_slave_config(chan, &cfg);
0106     if (ret) {
0107         dev_warn(drv_data->ssp->dev, "DMA slave config failed\n");
0108         return NULL;
0109     }
0110 
0111     return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir,
0112                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0113 }
0114 
0115 irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
0116 {
0117     u32 status;
0118 
0119     status = read_SSSR_bits(drv_data, drv_data->mask_sr);
0120     if (status & SSSR_ROR) {
0121         dev_err(drv_data->ssp->dev, "FIFO overrun\n");
0122 
0123         dmaengine_terminate_async(drv_data->controller->dma_rx);
0124         dmaengine_terminate_async(drv_data->controller->dma_tx);
0125 
0126         pxa2xx_spi_dma_transfer_complete(drv_data, true);
0127         return IRQ_HANDLED;
0128     }
0129 
0130     return IRQ_NONE;
0131 }
0132 
0133 int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
0134                struct spi_transfer *xfer)
0135 {
0136     struct dma_async_tx_descriptor *tx_desc, *rx_desc;
0137     int err;
0138 
0139     tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
0140     if (!tx_desc) {
0141         dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n");
0142         err = -EBUSY;
0143         goto err_tx;
0144     }
0145 
0146     rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
0147     if (!rx_desc) {
0148         dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n");
0149         err = -EBUSY;
0150         goto err_rx;
0151     }
0152 
0153     /* We are ready when RX completes */
0154     rx_desc->callback = pxa2xx_spi_dma_callback;
0155     rx_desc->callback_param = drv_data;
0156 
0157     dmaengine_submit(rx_desc);
0158     dmaengine_submit(tx_desc);
0159     return 0;
0160 
0161 err_rx:
0162     dmaengine_terminate_async(drv_data->controller->dma_tx);
0163 err_tx:
0164     return err;
0165 }
0166 
0167 void pxa2xx_spi_dma_start(struct driver_data *drv_data)
0168 {
0169     dma_async_issue_pending(drv_data->controller->dma_rx);
0170     dma_async_issue_pending(drv_data->controller->dma_tx);
0171 
0172     atomic_set(&drv_data->dma_running, 1);
0173 }
0174 
0175 void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
0176 {
0177     atomic_set(&drv_data->dma_running, 0);
0178     dmaengine_terminate_sync(drv_data->controller->dma_rx);
0179     dmaengine_terminate_sync(drv_data->controller->dma_tx);
0180 }
0181 
0182 int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
0183 {
0184     struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
0185     struct spi_controller *controller = drv_data->controller;
0186     struct device *dev = drv_data->ssp->dev;
0187     dma_cap_mask_t mask;
0188 
0189     dma_cap_zero(mask);
0190     dma_cap_set(DMA_SLAVE, mask);
0191 
0192     controller->dma_tx = dma_request_slave_channel_compat(mask,
0193                 pdata->dma_filter, pdata->tx_param, dev, "tx");
0194     if (!controller->dma_tx)
0195         return -ENODEV;
0196 
0197     controller->dma_rx = dma_request_slave_channel_compat(mask,
0198                 pdata->dma_filter, pdata->rx_param, dev, "rx");
0199     if (!controller->dma_rx) {
0200         dma_release_channel(controller->dma_tx);
0201         controller->dma_tx = NULL;
0202         return -ENODEV;
0203     }
0204 
0205     return 0;
0206 }
0207 
0208 void pxa2xx_spi_dma_release(struct driver_data *drv_data)
0209 {
0210     struct spi_controller *controller = drv_data->controller;
0211 
0212     if (controller->dma_rx) {
0213         dmaengine_terminate_sync(controller->dma_rx);
0214         dma_release_channel(controller->dma_rx);
0215         controller->dma_rx = NULL;
0216     }
0217     if (controller->dma_tx) {
0218         dmaengine_terminate_sync(controller->dma_tx);
0219         dma_release_channel(controller->dma_tx);
0220         controller->dma_tx = NULL;
0221     }
0222 }
0223 
0224 int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
0225                        struct spi_device *spi,
0226                        u8 bits_per_word, u32 *burst_code,
0227                        u32 *threshold)
0228 {
0229     struct pxa2xx_spi_chip *chip_info = spi->controller_data;
0230     struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
0231     u32 dma_burst_size = drv_data->controller_info->dma_burst_size;
0232 
0233     /*
0234      * If the DMA burst size is given in chip_info we use that,
0235      * otherwise we use the default. Also we use the default FIFO
0236      * thresholds for now.
0237      */
0238     *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size;
0239     *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
0240            | SSCR1_TxTresh(TX_THRESH_DFLT);
0241 
0242     return 0;
0243 }