Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Driver for Cirrus Logic EP93xx SPI controller.
0004  *
0005  * Copyright (C) 2010-2011 Mika Westerberg
0006  *
0007  * Explicit FIFO handling code was inspired by amba-pl022 driver.
0008  *
0009  * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
0010  *
0011  * For more information about the SPI controller see documentation on Cirrus
0012  * Logic web site:
0013  *     https://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
0014  */
0015 
0016 #include <linux/io.h>
0017 #include <linux/clk.h>
0018 #include <linux/err.h>
0019 #include <linux/delay.h>
0020 #include <linux/device.h>
0021 #include <linux/dmaengine.h>
0022 #include <linux/bitops.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/module.h>
0025 #include <linux/platform_device.h>
0026 #include <linux/sched.h>
0027 #include <linux/scatterlist.h>
0028 #include <linux/spi/spi.h>
0029 
0030 #include <linux/platform_data/dma-ep93xx.h>
0031 #include <linux/platform_data/spi-ep93xx.h>
0032 
0033 #define SSPCR0          0x0000
0034 #define SSPCR0_SPO      BIT(6)
0035 #define SSPCR0_SPH      BIT(7)
0036 #define SSPCR0_SCR_SHIFT    8
0037 
0038 #define SSPCR1          0x0004
0039 #define SSPCR1_RIE      BIT(0)
0040 #define SSPCR1_TIE      BIT(1)
0041 #define SSPCR1_RORIE        BIT(2)
0042 #define SSPCR1_LBM      BIT(3)
0043 #define SSPCR1_SSE      BIT(4)
0044 #define SSPCR1_MS       BIT(5)
0045 #define SSPCR1_SOD      BIT(6)
0046 
0047 #define SSPDR           0x0008
0048 
0049 #define SSPSR           0x000c
0050 #define SSPSR_TFE       BIT(0)
0051 #define SSPSR_TNF       BIT(1)
0052 #define SSPSR_RNE       BIT(2)
0053 #define SSPSR_RFF       BIT(3)
0054 #define SSPSR_BSY       BIT(4)
0055 #define SSPCPSR         0x0010
0056 
0057 #define SSPIIR          0x0014
0058 #define SSPIIR_RIS      BIT(0)
0059 #define SSPIIR_TIS      BIT(1)
0060 #define SSPIIR_RORIS        BIT(2)
0061 #define SSPICR          SSPIIR
0062 
0063 /* timeout in milliseconds */
0064 #define SPI_TIMEOUT     5
0065 /* maximum depth of RX/TX FIFO */
0066 #define SPI_FIFO_SIZE       8
0067 
0068 /**
0069  * struct ep93xx_spi - EP93xx SPI controller structure
0070  * @clk: clock for the controller
0071  * @mmio: pointer to ioremap()'d registers
0072  * @sspdr_phys: physical address of the SSPDR register
0073  * @tx: current byte in transfer to transmit
0074  * @rx: current byte in transfer to receive
0075  * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
0076  *              frame decreases this level and sending one frame increases it.
0077  * @dma_rx: RX DMA channel
0078  * @dma_tx: TX DMA channel
0079  * @dma_rx_data: RX parameters passed to the DMA engine
0080  * @dma_tx_data: TX parameters passed to the DMA engine
0081  * @rx_sgt: sg table for RX transfers
0082  * @tx_sgt: sg table for TX transfers
0083  * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
0084  *            the client
0085  */
0086 struct ep93xx_spi {
0087     struct clk          *clk;
0088     void __iomem            *mmio;
0089     unsigned long           sspdr_phys;
0090     size_t              tx;
0091     size_t              rx;
0092     size_t              fifo_level;
0093     struct dma_chan         *dma_rx;
0094     struct dma_chan         *dma_tx;
0095     struct ep93xx_dma_data      dma_rx_data;
0096     struct ep93xx_dma_data      dma_tx_data;
0097     struct sg_table         rx_sgt;
0098     struct sg_table         tx_sgt;
0099     void                *zeropage;
0100 };
0101 
0102 /* converts bits per word to CR0.DSS value */
0103 #define bits_per_word_to_dss(bpw)   ((bpw) - 1)
0104 
0105 /**
0106  * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
0107  * @master: SPI master
0108  * @rate: desired SPI output clock rate
0109  * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
0110  * @div_scr: pointer to return the scr divider
0111  */
0112 static int ep93xx_spi_calc_divisors(struct spi_master *master,
0113                     u32 rate, u8 *div_cpsr, u8 *div_scr)
0114 {
0115     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0116     unsigned long spi_clk_rate = clk_get_rate(espi->clk);
0117     int cpsr, scr;
0118 
0119     /*
0120      * Make sure that max value is between values supported by the
0121      * controller.
0122      */
0123     rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
0124 
0125     /*
0126      * Calculate divisors so that we can get speed according the
0127      * following formula:
0128      *  rate = spi_clock_rate / (cpsr * (1 + scr))
0129      *
0130      * cpsr must be even number and starts from 2, scr can be any number
0131      * between 0 and 255.
0132      */
0133     for (cpsr = 2; cpsr <= 254; cpsr += 2) {
0134         for (scr = 0; scr <= 255; scr++) {
0135             if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
0136                 *div_scr = (u8)scr;
0137                 *div_cpsr = (u8)cpsr;
0138                 return 0;
0139             }
0140         }
0141     }
0142 
0143     return -EINVAL;
0144 }
0145 
0146 static int ep93xx_spi_chip_setup(struct spi_master *master,
0147                  struct spi_device *spi,
0148                  struct spi_transfer *xfer)
0149 {
0150     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0151     u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
0152     u8 div_cpsr = 0;
0153     u8 div_scr = 0;
0154     u16 cr0;
0155     int err;
0156 
0157     err = ep93xx_spi_calc_divisors(master, xfer->speed_hz,
0158                        &div_cpsr, &div_scr);
0159     if (err)
0160         return err;
0161 
0162     cr0 = div_scr << SSPCR0_SCR_SHIFT;
0163     if (spi->mode & SPI_CPOL)
0164         cr0 |= SSPCR0_SPO;
0165     if (spi->mode & SPI_CPHA)
0166         cr0 |= SSPCR0_SPH;
0167     cr0 |= dss;
0168 
0169     dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
0170         spi->mode, div_cpsr, div_scr, dss);
0171     dev_dbg(&master->dev, "setup: cr0 %#x\n", cr0);
0172 
0173     writel(div_cpsr, espi->mmio + SSPCPSR);
0174     writel(cr0, espi->mmio + SSPCR0);
0175 
0176     return 0;
0177 }
0178 
0179 static void ep93xx_do_write(struct spi_master *master)
0180 {
0181     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0182     struct spi_transfer *xfer = master->cur_msg->state;
0183     u32 val = 0;
0184 
0185     if (xfer->bits_per_word > 8) {
0186         if (xfer->tx_buf)
0187             val = ((u16 *)xfer->tx_buf)[espi->tx];
0188         espi->tx += 2;
0189     } else {
0190         if (xfer->tx_buf)
0191             val = ((u8 *)xfer->tx_buf)[espi->tx];
0192         espi->tx += 1;
0193     }
0194     writel(val, espi->mmio + SSPDR);
0195 }
0196 
0197 static void ep93xx_do_read(struct spi_master *master)
0198 {
0199     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0200     struct spi_transfer *xfer = master->cur_msg->state;
0201     u32 val;
0202 
0203     val = readl(espi->mmio + SSPDR);
0204     if (xfer->bits_per_word > 8) {
0205         if (xfer->rx_buf)
0206             ((u16 *)xfer->rx_buf)[espi->rx] = val;
0207         espi->rx += 2;
0208     } else {
0209         if (xfer->rx_buf)
0210             ((u8 *)xfer->rx_buf)[espi->rx] = val;
0211         espi->rx += 1;
0212     }
0213 }
0214 
0215 /**
0216  * ep93xx_spi_read_write() - perform next RX/TX transfer
0217  * @master: SPI master
0218  *
0219  * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
0220  * called several times, the whole transfer will be completed. Returns
0221  * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
0222  *
0223  * When this function is finished, RX FIFO should be empty and TX FIFO should be
0224  * full.
0225  */
0226 static int ep93xx_spi_read_write(struct spi_master *master)
0227 {
0228     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0229     struct spi_transfer *xfer = master->cur_msg->state;
0230 
0231     /* read as long as RX FIFO has frames in it */
0232     while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) {
0233         ep93xx_do_read(master);
0234         espi->fifo_level--;
0235     }
0236 
0237     /* write as long as TX FIFO has room */
0238     while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) {
0239         ep93xx_do_write(master);
0240         espi->fifo_level++;
0241     }
0242 
0243     if (espi->rx == xfer->len)
0244         return 0;
0245 
0246     return -EINPROGRESS;
0247 }
0248 
0249 static enum dma_transfer_direction
0250 ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
0251 {
0252     switch (dir) {
0253     case DMA_TO_DEVICE:
0254         return DMA_MEM_TO_DEV;
0255     case DMA_FROM_DEVICE:
0256         return DMA_DEV_TO_MEM;
0257     default:
0258         return DMA_TRANS_NONE;
0259     }
0260 }
0261 
0262 /**
0263  * ep93xx_spi_dma_prepare() - prepares a DMA transfer
0264  * @master: SPI master
0265  * @dir: DMA transfer direction
0266  *
0267  * Function configures the DMA, maps the buffer and prepares the DMA
0268  * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
0269  * in case of failure.
0270  */
0271 static struct dma_async_tx_descriptor *
0272 ep93xx_spi_dma_prepare(struct spi_master *master,
0273                enum dma_data_direction dir)
0274 {
0275     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0276     struct spi_transfer *xfer = master->cur_msg->state;
0277     struct dma_async_tx_descriptor *txd;
0278     enum dma_slave_buswidth buswidth;
0279     struct dma_slave_config conf;
0280     struct scatterlist *sg;
0281     struct sg_table *sgt;
0282     struct dma_chan *chan;
0283     const void *buf, *pbuf;
0284     size_t len = xfer->len;
0285     int i, ret, nents;
0286 
0287     if (xfer->bits_per_word > 8)
0288         buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
0289     else
0290         buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
0291 
0292     memset(&conf, 0, sizeof(conf));
0293     conf.direction = ep93xx_dma_data_to_trans_dir(dir);
0294 
0295     if (dir == DMA_FROM_DEVICE) {
0296         chan = espi->dma_rx;
0297         buf = xfer->rx_buf;
0298         sgt = &espi->rx_sgt;
0299 
0300         conf.src_addr = espi->sspdr_phys;
0301         conf.src_addr_width = buswidth;
0302     } else {
0303         chan = espi->dma_tx;
0304         buf = xfer->tx_buf;
0305         sgt = &espi->tx_sgt;
0306 
0307         conf.dst_addr = espi->sspdr_phys;
0308         conf.dst_addr_width = buswidth;
0309     }
0310 
0311     ret = dmaengine_slave_config(chan, &conf);
0312     if (ret)
0313         return ERR_PTR(ret);
0314 
0315     /*
0316      * We need to split the transfer into PAGE_SIZE'd chunks. This is
0317      * because we are using @espi->zeropage to provide a zero RX buffer
0318      * for the TX transfers and we have only allocated one page for that.
0319      *
0320      * For performance reasons we allocate a new sg_table only when
0321      * needed. Otherwise we will re-use the current one. Eventually the
0322      * last sg_table is released in ep93xx_spi_release_dma().
0323      */
0324 
0325     nents = DIV_ROUND_UP(len, PAGE_SIZE);
0326     if (nents != sgt->nents) {
0327         sg_free_table(sgt);
0328 
0329         ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
0330         if (ret)
0331             return ERR_PTR(ret);
0332     }
0333 
0334     pbuf = buf;
0335     for_each_sg(sgt->sgl, sg, sgt->nents, i) {
0336         size_t bytes = min_t(size_t, len, PAGE_SIZE);
0337 
0338         if (buf) {
0339             sg_set_page(sg, virt_to_page(pbuf), bytes,
0340                     offset_in_page(pbuf));
0341         } else {
0342             sg_set_page(sg, virt_to_page(espi->zeropage),
0343                     bytes, 0);
0344         }
0345 
0346         pbuf += bytes;
0347         len -= bytes;
0348     }
0349 
0350     if (WARN_ON(len)) {
0351         dev_warn(&master->dev, "len = %zu expected 0!\n", len);
0352         return ERR_PTR(-EINVAL);
0353     }
0354 
0355     nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
0356     if (!nents)
0357         return ERR_PTR(-ENOMEM);
0358 
0359     txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
0360                       DMA_CTRL_ACK);
0361     if (!txd) {
0362         dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
0363         return ERR_PTR(-ENOMEM);
0364     }
0365     return txd;
0366 }
0367 
0368 /**
0369  * ep93xx_spi_dma_finish() - finishes with a DMA transfer
0370  * @master: SPI master
0371  * @dir: DMA transfer direction
0372  *
0373  * Function finishes with the DMA transfer. After this, the DMA buffer is
0374  * unmapped.
0375  */
0376 static void ep93xx_spi_dma_finish(struct spi_master *master,
0377                   enum dma_data_direction dir)
0378 {
0379     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0380     struct dma_chan *chan;
0381     struct sg_table *sgt;
0382 
0383     if (dir == DMA_FROM_DEVICE) {
0384         chan = espi->dma_rx;
0385         sgt = &espi->rx_sgt;
0386     } else {
0387         chan = espi->dma_tx;
0388         sgt = &espi->tx_sgt;
0389     }
0390 
0391     dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
0392 }
0393 
0394 static void ep93xx_spi_dma_callback(void *callback_param)
0395 {
0396     struct spi_master *master = callback_param;
0397 
0398     ep93xx_spi_dma_finish(master, DMA_TO_DEVICE);
0399     ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
0400 
0401     spi_finalize_current_transfer(master);
0402 }
0403 
0404 static int ep93xx_spi_dma_transfer(struct spi_master *master)
0405 {
0406     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0407     struct dma_async_tx_descriptor *rxd, *txd;
0408 
0409     rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE);
0410     if (IS_ERR(rxd)) {
0411         dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
0412         return PTR_ERR(rxd);
0413     }
0414 
0415     txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE);
0416     if (IS_ERR(txd)) {
0417         ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
0418         dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
0419         return PTR_ERR(txd);
0420     }
0421 
0422     /* We are ready when RX is done */
0423     rxd->callback = ep93xx_spi_dma_callback;
0424     rxd->callback_param = master;
0425 
0426     /* Now submit both descriptors and start DMA */
0427     dmaengine_submit(rxd);
0428     dmaengine_submit(txd);
0429 
0430     dma_async_issue_pending(espi->dma_rx);
0431     dma_async_issue_pending(espi->dma_tx);
0432 
0433     /* signal that we need to wait for completion */
0434     return 1;
0435 }
0436 
0437 static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
0438 {
0439     struct spi_master *master = dev_id;
0440     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0441     u32 val;
0442 
0443     /*
0444      * If we got ROR (receive overrun) interrupt we know that something is
0445      * wrong. Just abort the message.
0446      */
0447     if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) {
0448         /* clear the overrun interrupt */
0449         writel(0, espi->mmio + SSPICR);
0450         dev_warn(&master->dev,
0451              "receive overrun, aborting the message\n");
0452         master->cur_msg->status = -EIO;
0453     } else {
0454         /*
0455          * Interrupt is either RX (RIS) or TX (TIS). For both cases we
0456          * simply execute next data transfer.
0457          */
0458         if (ep93xx_spi_read_write(master)) {
0459             /*
0460              * In normal case, there still is some processing left
0461              * for current transfer. Let's wait for the next
0462              * interrupt then.
0463              */
0464             return IRQ_HANDLED;
0465         }
0466     }
0467 
0468     /*
0469      * Current transfer is finished, either with error or with success. In
0470      * any case we disable interrupts and notify the worker to handle
0471      * any post-processing of the message.
0472      */
0473     val = readl(espi->mmio + SSPCR1);
0474     val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
0475     writel(val, espi->mmio + SSPCR1);
0476 
0477     spi_finalize_current_transfer(master);
0478 
0479     return IRQ_HANDLED;
0480 }
0481 
0482 static int ep93xx_spi_transfer_one(struct spi_master *master,
0483                    struct spi_device *spi,
0484                    struct spi_transfer *xfer)
0485 {
0486     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0487     u32 val;
0488     int ret;
0489 
0490     ret = ep93xx_spi_chip_setup(master, spi, xfer);
0491     if (ret) {
0492         dev_err(&master->dev, "failed to setup chip for transfer\n");
0493         return ret;
0494     }
0495 
0496     master->cur_msg->state = xfer;
0497     espi->rx = 0;
0498     espi->tx = 0;
0499 
0500     /*
0501      * There is no point of setting up DMA for the transfers which will
0502      * fit into the FIFO and can be transferred with a single interrupt.
0503      * So in these cases we will be using PIO and don't bother for DMA.
0504      */
0505     if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE)
0506         return ep93xx_spi_dma_transfer(master);
0507 
0508     /* Using PIO so prime the TX FIFO and enable interrupts */
0509     ep93xx_spi_read_write(master);
0510 
0511     val = readl(espi->mmio + SSPCR1);
0512     val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
0513     writel(val, espi->mmio + SSPCR1);
0514 
0515     /* signal that we need to wait for completion */
0516     return 1;
0517 }
0518 
0519 static int ep93xx_spi_prepare_message(struct spi_master *master,
0520                       struct spi_message *msg)
0521 {
0522     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0523     unsigned long timeout;
0524 
0525     /*
0526      * Just to be sure: flush any data from RX FIFO.
0527      */
0528     timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
0529     while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
0530         if (time_after(jiffies, timeout)) {
0531             dev_warn(&master->dev,
0532                  "timeout while flushing RX FIFO\n");
0533             return -ETIMEDOUT;
0534         }
0535         readl(espi->mmio + SSPDR);
0536     }
0537 
0538     /*
0539      * We explicitly handle FIFO level. This way we don't have to check TX
0540      * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
0541      */
0542     espi->fifo_level = 0;
0543 
0544     return 0;
0545 }
0546 
0547 static int ep93xx_spi_prepare_hardware(struct spi_master *master)
0548 {
0549     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0550     u32 val;
0551     int ret;
0552 
0553     ret = clk_prepare_enable(espi->clk);
0554     if (ret)
0555         return ret;
0556 
0557     val = readl(espi->mmio + SSPCR1);
0558     val |= SSPCR1_SSE;
0559     writel(val, espi->mmio + SSPCR1);
0560 
0561     return 0;
0562 }
0563 
0564 static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
0565 {
0566     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0567     u32 val;
0568 
0569     val = readl(espi->mmio + SSPCR1);
0570     val &= ~SSPCR1_SSE;
0571     writel(val, espi->mmio + SSPCR1);
0572 
0573     clk_disable_unprepare(espi->clk);
0574 
0575     return 0;
0576 }
0577 
0578 static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
0579 {
0580     if (ep93xx_dma_chan_is_m2p(chan))
0581         return false;
0582 
0583     chan->private = filter_param;
0584     return true;
0585 }
0586 
0587 static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
0588 {
0589     dma_cap_mask_t mask;
0590     int ret;
0591 
0592     espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
0593     if (!espi->zeropage)
0594         return -ENOMEM;
0595 
0596     dma_cap_zero(mask);
0597     dma_cap_set(DMA_SLAVE, mask);
0598 
0599     espi->dma_rx_data.port = EP93XX_DMA_SSP;
0600     espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
0601     espi->dma_rx_data.name = "ep93xx-spi-rx";
0602 
0603     espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
0604                        &espi->dma_rx_data);
0605     if (!espi->dma_rx) {
0606         ret = -ENODEV;
0607         goto fail_free_page;
0608     }
0609 
0610     espi->dma_tx_data.port = EP93XX_DMA_SSP;
0611     espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
0612     espi->dma_tx_data.name = "ep93xx-spi-tx";
0613 
0614     espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
0615                        &espi->dma_tx_data);
0616     if (!espi->dma_tx) {
0617         ret = -ENODEV;
0618         goto fail_release_rx;
0619     }
0620 
0621     return 0;
0622 
0623 fail_release_rx:
0624     dma_release_channel(espi->dma_rx);
0625     espi->dma_rx = NULL;
0626 fail_free_page:
0627     free_page((unsigned long)espi->zeropage);
0628 
0629     return ret;
0630 }
0631 
0632 static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
0633 {
0634     if (espi->dma_rx) {
0635         dma_release_channel(espi->dma_rx);
0636         sg_free_table(&espi->rx_sgt);
0637     }
0638     if (espi->dma_tx) {
0639         dma_release_channel(espi->dma_tx);
0640         sg_free_table(&espi->tx_sgt);
0641     }
0642 
0643     if (espi->zeropage)
0644         free_page((unsigned long)espi->zeropage);
0645 }
0646 
0647 static int ep93xx_spi_probe(struct platform_device *pdev)
0648 {
0649     struct spi_master *master;
0650     struct ep93xx_spi_info *info;
0651     struct ep93xx_spi *espi;
0652     struct resource *res;
0653     int irq;
0654     int error;
0655 
0656     info = dev_get_platdata(&pdev->dev);
0657     if (!info) {
0658         dev_err(&pdev->dev, "missing platform data\n");
0659         return -EINVAL;
0660     }
0661 
0662     irq = platform_get_irq(pdev, 0);
0663     if (irq < 0)
0664         return -EBUSY;
0665 
0666     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0667     if (!res) {
0668         dev_err(&pdev->dev, "unable to get iomem resource\n");
0669         return -ENODEV;
0670     }
0671 
0672     master = spi_alloc_master(&pdev->dev, sizeof(*espi));
0673     if (!master)
0674         return -ENOMEM;
0675 
0676     master->use_gpio_descriptors = true;
0677     master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
0678     master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
0679     master->prepare_message = ep93xx_spi_prepare_message;
0680     master->transfer_one = ep93xx_spi_transfer_one;
0681     master->bus_num = pdev->id;
0682     master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
0683     master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
0684     /*
0685      * The SPI core will count the number of GPIO descriptors to figure
0686      * out the number of chip selects available on the platform.
0687      */
0688     master->num_chipselect = 0;
0689 
0690     platform_set_drvdata(pdev, master);
0691 
0692     espi = spi_master_get_devdata(master);
0693 
0694     espi->clk = devm_clk_get(&pdev->dev, NULL);
0695     if (IS_ERR(espi->clk)) {
0696         dev_err(&pdev->dev, "unable to get spi clock\n");
0697         error = PTR_ERR(espi->clk);
0698         goto fail_release_master;
0699     }
0700 
0701     /*
0702      * Calculate maximum and minimum supported clock rates
0703      * for the controller.
0704      */
0705     master->max_speed_hz = clk_get_rate(espi->clk) / 2;
0706     master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
0707 
0708     espi->sspdr_phys = res->start + SSPDR;
0709 
0710     espi->mmio = devm_ioremap_resource(&pdev->dev, res);
0711     if (IS_ERR(espi->mmio)) {
0712         error = PTR_ERR(espi->mmio);
0713         goto fail_release_master;
0714     }
0715 
0716     error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
0717                 0, "ep93xx-spi", master);
0718     if (error) {
0719         dev_err(&pdev->dev, "failed to request irq\n");
0720         goto fail_release_master;
0721     }
0722 
0723     if (info->use_dma && ep93xx_spi_setup_dma(espi))
0724         dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
0725 
0726     /* make sure that the hardware is disabled */
0727     writel(0, espi->mmio + SSPCR1);
0728 
0729     error = devm_spi_register_master(&pdev->dev, master);
0730     if (error) {
0731         dev_err(&pdev->dev, "failed to register SPI master\n");
0732         goto fail_free_dma;
0733     }
0734 
0735     dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
0736          (unsigned long)res->start, irq);
0737 
0738     return 0;
0739 
0740 fail_free_dma:
0741     ep93xx_spi_release_dma(espi);
0742 fail_release_master:
0743     spi_master_put(master);
0744 
0745     return error;
0746 }
0747 
0748 static int ep93xx_spi_remove(struct platform_device *pdev)
0749 {
0750     struct spi_master *master = platform_get_drvdata(pdev);
0751     struct ep93xx_spi *espi = spi_master_get_devdata(master);
0752 
0753     ep93xx_spi_release_dma(espi);
0754 
0755     return 0;
0756 }
0757 
0758 static struct platform_driver ep93xx_spi_driver = {
0759     .driver     = {
0760         .name   = "ep93xx-spi",
0761     },
0762     .probe      = ep93xx_spi_probe,
0763     .remove     = ep93xx_spi_remove,
0764 };
0765 module_platform_driver(ep93xx_spi_driver);
0766 
0767 MODULE_DESCRIPTION("EP93xx SPI Controller driver");
0768 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
0769 MODULE_LICENSE("GPL");
0770 MODULE_ALIAS("platform:ep93xx-spi");