Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * S3C24XX DMA handling
0004  *
0005  * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
0006  *
0007  * based on amba-pl08x.c
0008  *
0009  * Copyright (c) 2006 ARM Ltd.
0010  * Copyright (c) 2010 ST-Ericsson SA
0011  *
0012  * Author: Peter Pearse <peter.pearse@arm.com>
0013  * Author: Linus Walleij <linus.walleij@stericsson.com>
0014  *
0015  * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
0016  * that can be routed to any of the 4 to 8 hardware-channels.
0017  *
0018  * Therefore on these DMA controllers the number of channels
0019  * and the number of incoming DMA signals are two totally different things.
0020  * It is usually not possible to theoretically handle all physical signals,
0021  * so a multiplexing scheme with possible denial of use is necessary.
0022  *
0023  * Open items:
0024  * - bursts
0025  */
0026 
0027 #include <linux/platform_device.h>
0028 #include <linux/types.h>
0029 #include <linux/dmaengine.h>
0030 #include <linux/dma-mapping.h>
0031 #include <linux/interrupt.h>
0032 #include <linux/clk.h>
0033 #include <linux/module.h>
0034 #include <linux/mod_devicetable.h>
0035 #include <linux/slab.h>
0036 #include <linux/platform_data/dma-s3c24xx.h>
0037 
0038 #include "dmaengine.h"
0039 #include "virt-dma.h"
0040 
0041 #define MAX_DMA_CHANNELS    8
0042 
0043 #define S3C24XX_DISRC           0x00
0044 #define S3C24XX_DISRCC          0x04
0045 #define S3C24XX_DISRCC_INC_INCREMENT    0
0046 #define S3C24XX_DISRCC_INC_FIXED    BIT(0)
0047 #define S3C24XX_DISRCC_LOC_AHB      0
0048 #define S3C24XX_DISRCC_LOC_APB      BIT(1)
0049 
0050 #define S3C24XX_DIDST           0x08
0051 #define S3C24XX_DIDSTC          0x0c
0052 #define S3C24XX_DIDSTC_INC_INCREMENT    0
0053 #define S3C24XX_DIDSTC_INC_FIXED    BIT(0)
0054 #define S3C24XX_DIDSTC_LOC_AHB      0
0055 #define S3C24XX_DIDSTC_LOC_APB      BIT(1)
0056 #define S3C24XX_DIDSTC_INT_TC0      0
0057 #define S3C24XX_DIDSTC_INT_RELOAD   BIT(2)
0058 
0059 #define S3C24XX_DCON            0x10
0060 
0061 #define S3C24XX_DCON_TC_MASK        0xfffff
0062 #define S3C24XX_DCON_DSZ_BYTE       (0 << 20)
0063 #define S3C24XX_DCON_DSZ_HALFWORD   (1 << 20)
0064 #define S3C24XX_DCON_DSZ_WORD       (2 << 20)
0065 #define S3C24XX_DCON_DSZ_MASK       (3 << 20)
0066 #define S3C24XX_DCON_DSZ_SHIFT      20
0067 #define S3C24XX_DCON_AUTORELOAD     0
0068 #define S3C24XX_DCON_NORELOAD       BIT(22)
0069 #define S3C24XX_DCON_HWTRIG     BIT(23)
0070 #define S3C24XX_DCON_HWSRC_SHIFT    24
0071 #define S3C24XX_DCON_SERV_SINGLE    0
0072 #define S3C24XX_DCON_SERV_WHOLE     BIT(27)
0073 #define S3C24XX_DCON_TSZ_UNIT       0
0074 #define S3C24XX_DCON_TSZ_BURST4     BIT(28)
0075 #define S3C24XX_DCON_INT        BIT(29)
0076 #define S3C24XX_DCON_SYNC_PCLK      0
0077 #define S3C24XX_DCON_SYNC_HCLK      BIT(30)
0078 #define S3C24XX_DCON_DEMAND     0
0079 #define S3C24XX_DCON_HANDSHAKE      BIT(31)
0080 
0081 #define S3C24XX_DSTAT           0x14
0082 #define S3C24XX_DSTAT_STAT_BUSY     BIT(20)
0083 #define S3C24XX_DSTAT_CURRTC_MASK   0xfffff
0084 
0085 #define S3C24XX_DMASKTRIG       0x20
0086 #define S3C24XX_DMASKTRIG_SWTRIG    BIT(0)
0087 #define S3C24XX_DMASKTRIG_ON        BIT(1)
0088 #define S3C24XX_DMASKTRIG_STOP      BIT(2)
0089 
0090 #define S3C24XX_DMAREQSEL       0x24
0091 #define S3C24XX_DMAREQSEL_HW        BIT(0)
0092 
0093 /*
0094  * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
0095  * for a DMA source. Instead only specific channels are valid.
0096  * All of these SoCs have 4 physical channels and the number of request
0097  * source bits is 3. Additionally we also need 1 bit to mark the channel
0098  * as valid.
0099  * Therefore we separate the chansel element of the channel data into 4
0100  * parts of 4 bits each, to hold the information if the channel is valid
0101  * and the hw request source to use.
0102  *
0103  * Example:
0104  * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
0105  * For it the chansel field would look like
0106  *
0107  * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
0108  * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
0109  * ((BIT(3) | 2) << 0 * 4)   // channel 0, with request source 2
0110  */
0111 #define S3C24XX_CHANSEL_WIDTH       4
0112 #define S3C24XX_CHANSEL_VALID       BIT(3)
0113 #define S3C24XX_CHANSEL_REQ_MASK    7
0114 
0115 /*
0116  * struct soc_data - vendor-specific config parameters for individual SoCs
0117  * @stride: spacing between the registers of each channel
0118  * @has_reqsel: does the controller use the newer requestselection mechanism
0119  * @has_clocks: are controllable dma-clocks present
0120  */
0121 struct soc_data {
0122     int stride;
0123     bool has_reqsel;
0124     bool has_clocks;
0125 };
0126 
0127 /*
0128  * enum s3c24xx_dma_chan_state - holds the virtual channel states
0129  * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
0130  * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
0131  * channel and is running a transfer on it
0132  * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
0133  * channel to become available (only pertains to memcpy channels)
0134  */
0135 enum s3c24xx_dma_chan_state {
0136     S3C24XX_DMA_CHAN_IDLE,
0137     S3C24XX_DMA_CHAN_RUNNING,
0138     S3C24XX_DMA_CHAN_WAITING,
0139 };
0140 
0141 /*
0142  * struct s3c24xx_sg - structure containing data per sg
0143  * @src_addr: src address of sg
0144  * @dst_addr: dst address of sg
0145  * @len: transfer len in bytes
0146  * @node: node for txd's dsg_list
0147  */
0148 struct s3c24xx_sg {
0149     dma_addr_t src_addr;
0150     dma_addr_t dst_addr;
0151     size_t len;
0152     struct list_head node;
0153 };
0154 
0155 /*
0156  * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
0157  * @vd: virtual DMA descriptor
0158  * @dsg_list: list of children sg's
0159  * @at: sg currently being transfered
0160  * @width: transfer width
0161  * @disrcc: value for source control register
0162  * @didstc: value for destination control register
0163  * @dcon: base value for dcon register
0164  * @cyclic: indicate cyclic transfer
0165  */
0166 struct s3c24xx_txd {
0167     struct virt_dma_desc vd;
0168     struct list_head dsg_list;
0169     struct list_head *at;
0170     u8 width;
0171     u32 disrcc;
0172     u32 didstc;
0173     u32 dcon;
0174     bool cyclic;
0175 };
0176 
0177 struct s3c24xx_dma_chan;
0178 
0179 /*
0180  * struct s3c24xx_dma_phy - holder for the physical channels
0181  * @id: physical index to this channel
0182  * @valid: does the channel have all required elements
0183  * @base: virtual memory base (remapped) for the this channel
0184  * @irq: interrupt for this channel
0185  * @clk: clock for this channel
0186  * @lock: a lock to use when altering an instance of this struct
0187  * @serving: virtual channel currently being served by this physicalchannel
0188  * @host: a pointer to the host (internal use)
0189  */
0190 struct s3c24xx_dma_phy {
0191     unsigned int            id;
0192     bool                valid;
0193     void __iomem            *base;
0194     int             irq;
0195     struct clk          *clk;
0196     spinlock_t          lock;
0197     struct s3c24xx_dma_chan     *serving;
0198     struct s3c24xx_dma_engine   *host;
0199 };
0200 
0201 /*
0202  * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
0203  * @id: the id of the channel
0204  * @name: name of the channel
0205  * @vc: wrapped virtual channel
0206  * @phy: the physical channel utilized by this channel, if there is one
0207  * @runtime_addr: address for RX/TX according to the runtime config
0208  * @at: active transaction on this channel
0209  * @lock: a lock for this channel data
0210  * @host: a pointer to the host (internal use)
0211  * @state: whether the channel is idle, running etc
0212  * @slave: whether this channel is a device (slave) or for memcpy
0213  */
0214 struct s3c24xx_dma_chan {
0215     int id;
0216     const char *name;
0217     struct virt_dma_chan vc;
0218     struct s3c24xx_dma_phy *phy;
0219     struct dma_slave_config cfg;
0220     struct s3c24xx_txd *at;
0221     struct s3c24xx_dma_engine *host;
0222     enum s3c24xx_dma_chan_state state;
0223     bool slave;
0224 };
0225 
0226 /*
0227  * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
0228  * @pdev: the corresponding platform device
0229  * @pdata: platform data passed in from the platform/machine
0230  * @base: virtual memory base (remapped)
0231  * @slave: slave engine for this instance
0232  * @memcpy: memcpy engine for this instance
0233  * @phy_chans: array of data for the physical channels
0234  */
0235 struct s3c24xx_dma_engine {
0236     struct platform_device          *pdev;
0237     const struct s3c24xx_dma_platdata   *pdata;
0238     struct soc_data             *sdata;
0239     void __iomem                *base;
0240     struct dma_device           slave;
0241     struct dma_device           memcpy;
0242     struct s3c24xx_dma_phy          *phy_chans;
0243 };
0244 
0245 /*
0246  * Physical channel handling
0247  */
0248 
0249 /*
0250  * Check whether a certain channel is busy or not.
0251  */
0252 static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
0253 {
0254     unsigned int val = readl(phy->base + S3C24XX_DSTAT);
0255     return val & S3C24XX_DSTAT_STAT_BUSY;
0256 }
0257 
0258 static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
0259                   struct s3c24xx_dma_phy *phy)
0260 {
0261     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0262     const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
0263     struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
0264     int phyvalid;
0265 
0266     /* every phy is valid for memcopy channels */
0267     if (!s3cchan->slave)
0268         return true;
0269 
0270     /* On newer variants all phys can be used for all virtual channels */
0271     if (s3cdma->sdata->has_reqsel)
0272         return true;
0273 
0274     phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
0275     return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
0276 }
0277 
0278 /*
0279  * Allocate a physical channel for a virtual channel
0280  *
0281  * Try to locate a physical channel to be used for this transfer. If all
0282  * are taken return NULL and the requester will have to cope by using
0283  * some fallback PIO mode or retrying later.
0284  */
0285 static
0286 struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
0287 {
0288     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0289     struct s3c24xx_dma_phy *phy = NULL;
0290     unsigned long flags;
0291     int i;
0292     int ret;
0293 
0294     for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
0295         phy = &s3cdma->phy_chans[i];
0296 
0297         if (!phy->valid)
0298             continue;
0299 
0300         if (!s3c24xx_dma_phy_valid(s3cchan, phy))
0301             continue;
0302 
0303         spin_lock_irqsave(&phy->lock, flags);
0304 
0305         if (!phy->serving) {
0306             phy->serving = s3cchan;
0307             spin_unlock_irqrestore(&phy->lock, flags);
0308             break;
0309         }
0310 
0311         spin_unlock_irqrestore(&phy->lock, flags);
0312     }
0313 
0314     /* No physical channel available, cope with it */
0315     if (i == s3cdma->pdata->num_phy_channels) {
0316         dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
0317         return NULL;
0318     }
0319 
0320     /* start the phy clock */
0321     if (s3cdma->sdata->has_clocks) {
0322         ret = clk_enable(phy->clk);
0323         if (ret) {
0324             dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
0325                 phy->id, ret);
0326             phy->serving = NULL;
0327             return NULL;
0328         }
0329     }
0330 
0331     return phy;
0332 }
0333 
0334 /*
0335  * Mark the physical channel as free.
0336  *
0337  * This drops the link between the physical and virtual channel.
0338  */
0339 static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
0340 {
0341     struct s3c24xx_dma_engine *s3cdma = phy->host;
0342 
0343     if (s3cdma->sdata->has_clocks)
0344         clk_disable(phy->clk);
0345 
0346     phy->serving = NULL;
0347 }
0348 
0349 /*
0350  * Stops the channel by writing the stop bit.
0351  * This should not be used for an on-going transfer, but as a method of
0352  * shutting down a channel (eg, when it's no longer used) or terminating a
0353  * transfer.
0354  */
0355 static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
0356 {
0357     writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
0358 }
0359 
0360 /*
0361  * Virtual channel handling
0362  */
0363 
0364 static inline
0365 struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
0366 {
0367     return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
0368 }
0369 
0370 static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
0371 {
0372     struct s3c24xx_dma_phy *phy = s3cchan->phy;
0373     struct s3c24xx_txd *txd = s3cchan->at;
0374     u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
0375 
0376     return tc * txd->width;
0377 }
0378 
0379 static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
0380                   struct dma_slave_config *config)
0381 {
0382     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
0383     unsigned long flags;
0384     int ret = 0;
0385 
0386     /* Reject definitely invalid configurations */
0387     if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
0388         config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
0389         return -EINVAL;
0390 
0391     spin_lock_irqsave(&s3cchan->vc.lock, flags);
0392 
0393     if (!s3cchan->slave) {
0394         ret = -EINVAL;
0395         goto out;
0396     }
0397 
0398     s3cchan->cfg = *config;
0399 
0400 out:
0401     spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
0402     return ret;
0403 }
0404 
0405 /*
0406  * Transfer handling
0407  */
0408 
0409 static inline
0410 struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
0411 {
0412     return container_of(tx, struct s3c24xx_txd, vd.tx);
0413 }
0414 
0415 static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
0416 {
0417     struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
0418 
0419     if (txd) {
0420         INIT_LIST_HEAD(&txd->dsg_list);
0421         txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
0422     }
0423 
0424     return txd;
0425 }
0426 
0427 static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
0428 {
0429     struct s3c24xx_sg *dsg, *_dsg;
0430 
0431     list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
0432         list_del(&dsg->node);
0433         kfree(dsg);
0434     }
0435 
0436     kfree(txd);
0437 }
0438 
0439 static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
0440                        struct s3c24xx_txd *txd)
0441 {
0442     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0443     struct s3c24xx_dma_phy *phy = s3cchan->phy;
0444     const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
0445     struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
0446     u32 dcon = txd->dcon;
0447     u32 val;
0448 
0449     /* transfer-size and -count from len and width */
0450     switch (txd->width) {
0451     case 1:
0452         dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
0453         break;
0454     case 2:
0455         dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
0456         break;
0457     case 4:
0458         dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
0459         break;
0460     }
0461 
0462     if (s3cchan->slave) {
0463         struct s3c24xx_dma_channel *cdata =
0464                     &pdata->channels[s3cchan->id];
0465 
0466         if (s3cdma->sdata->has_reqsel) {
0467             writel_relaxed((cdata->chansel << 1) |
0468                             S3C24XX_DMAREQSEL_HW,
0469                     phy->base + S3C24XX_DMAREQSEL);
0470         } else {
0471             int csel = cdata->chansel >> (phy->id *
0472                             S3C24XX_CHANSEL_WIDTH);
0473 
0474             csel &= S3C24XX_CHANSEL_REQ_MASK;
0475             dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
0476             dcon |= S3C24XX_DCON_HWTRIG;
0477         }
0478     } else {
0479         if (s3cdma->sdata->has_reqsel)
0480             writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
0481     }
0482 
0483     writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
0484     writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
0485     writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
0486     writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
0487     writel_relaxed(dcon, phy->base + S3C24XX_DCON);
0488 
0489     val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
0490     val &= ~S3C24XX_DMASKTRIG_STOP;
0491     val |= S3C24XX_DMASKTRIG_ON;
0492 
0493     /* trigger the dma operation for memcpy transfers */
0494     if (!s3cchan->slave)
0495         val |= S3C24XX_DMASKTRIG_SWTRIG;
0496 
0497     writel(val, phy->base + S3C24XX_DMASKTRIG);
0498 }
0499 
0500 /*
0501  * Set the initial DMA register values and start first sg.
0502  */
0503 static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
0504 {
0505     struct s3c24xx_dma_phy *phy = s3cchan->phy;
0506     struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
0507     struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
0508 
0509     list_del(&txd->vd.node);
0510 
0511     s3cchan->at = txd;
0512 
0513     /* Wait for channel inactive */
0514     while (s3c24xx_dma_phy_busy(phy))
0515         cpu_relax();
0516 
0517     /* point to the first element of the sg list */
0518     txd->at = txd->dsg_list.next;
0519     s3c24xx_dma_start_next_sg(s3cchan, txd);
0520 }
0521 
0522 /*
0523  * Try to allocate a physical channel.  When successful, assign it to
0524  * this virtual channel, and initiate the next descriptor.  The
0525  * virtual channel lock must be held at this point.
0526  */
0527 static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
0528 {
0529     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0530     struct s3c24xx_dma_phy *phy;
0531 
0532     phy = s3c24xx_dma_get_phy(s3cchan);
0533     if (!phy) {
0534         dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
0535             s3cchan->name);
0536         s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
0537         return;
0538     }
0539 
0540     dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
0541         phy->id, s3cchan->name);
0542 
0543     s3cchan->phy = phy;
0544     s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
0545 
0546     s3c24xx_dma_start_next_txd(s3cchan);
0547 }
0548 
0549 static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
0550     struct s3c24xx_dma_chan *s3cchan)
0551 {
0552     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0553 
0554     dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
0555         phy->id, s3cchan->name);
0556 
0557     /*
0558      * We do this without taking the lock; we're really only concerned
0559      * about whether this pointer is NULL or not, and we're guaranteed
0560      * that this will only be called when it _already_ is non-NULL.
0561      */
0562     phy->serving = s3cchan;
0563     s3cchan->phy = phy;
0564     s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
0565     s3c24xx_dma_start_next_txd(s3cchan);
0566 }
0567 
0568 /*
0569  * Free a physical DMA channel, potentially reallocating it to another
0570  * virtual channel if we have any pending.
0571  */
0572 static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
0573 {
0574     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0575     struct s3c24xx_dma_chan *p, *next;
0576 
0577 retry:
0578     next = NULL;
0579 
0580     /* Find a waiting virtual channel for the next transfer. */
0581     list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
0582         if (p->state == S3C24XX_DMA_CHAN_WAITING) {
0583             next = p;
0584             break;
0585         }
0586 
0587     if (!next) {
0588         list_for_each_entry(p, &s3cdma->slave.channels,
0589                     vc.chan.device_node)
0590             if (p->state == S3C24XX_DMA_CHAN_WAITING &&
0591                       s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
0592                 next = p;
0593                 break;
0594             }
0595     }
0596 
0597     /* Ensure that the physical channel is stopped */
0598     s3c24xx_dma_terminate_phy(s3cchan->phy);
0599 
0600     if (next) {
0601         bool success;
0602 
0603         /*
0604          * Eww.  We know this isn't going to deadlock
0605          * but lockdep probably doesn't.
0606          */
0607         spin_lock(&next->vc.lock);
0608         /* Re-check the state now that we have the lock */
0609         success = next->state == S3C24XX_DMA_CHAN_WAITING;
0610         if (success)
0611             s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
0612         spin_unlock(&next->vc.lock);
0613 
0614         /* If the state changed, try to find another channel */
0615         if (!success)
0616             goto retry;
0617     } else {
0618         /* No more jobs, so free up the physical channel */
0619         s3c24xx_dma_put_phy(s3cchan->phy);
0620     }
0621 
0622     s3cchan->phy = NULL;
0623     s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
0624 }
0625 
0626 static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
0627 {
0628     struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
0629     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
0630 
0631     if (!s3cchan->slave)
0632         dma_descriptor_unmap(&vd->tx);
0633 
0634     s3c24xx_dma_free_txd(txd);
0635 }
0636 
0637 static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
0638 {
0639     struct s3c24xx_dma_phy *phy = data;
0640     struct s3c24xx_dma_chan *s3cchan = phy->serving;
0641     struct s3c24xx_txd *txd;
0642 
0643     dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
0644 
0645     /*
0646      * Interrupts happen to notify the completion of a transfer and the
0647      * channel should have moved into its stop state already on its own.
0648      * Therefore interrupts on channels not bound to a virtual channel
0649      * should never happen. Nevertheless send a terminate command to the
0650      * channel if the unlikely case happens.
0651      */
0652     if (unlikely(!s3cchan)) {
0653         dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
0654             phy->id);
0655 
0656         s3c24xx_dma_terminate_phy(phy);
0657 
0658         return IRQ_HANDLED;
0659     }
0660 
0661     spin_lock(&s3cchan->vc.lock);
0662     txd = s3cchan->at;
0663     if (txd) {
0664         /* when more sg's are in this txd, start the next one */
0665         if (!list_is_last(txd->at, &txd->dsg_list)) {
0666             txd->at = txd->at->next;
0667             if (txd->cyclic)
0668                 vchan_cyclic_callback(&txd->vd);
0669             s3c24xx_dma_start_next_sg(s3cchan, txd);
0670         } else if (!txd->cyclic) {
0671             s3cchan->at = NULL;
0672             vchan_cookie_complete(&txd->vd);
0673 
0674             /*
0675              * And start the next descriptor (if any),
0676              * otherwise free this channel.
0677              */
0678             if (vchan_next_desc(&s3cchan->vc))
0679                 s3c24xx_dma_start_next_txd(s3cchan);
0680             else
0681                 s3c24xx_dma_phy_free(s3cchan);
0682         } else {
0683             vchan_cyclic_callback(&txd->vd);
0684 
0685             /* Cyclic: reset at beginning */
0686             txd->at = txd->dsg_list.next;
0687             s3c24xx_dma_start_next_sg(s3cchan, txd);
0688         }
0689     }
0690     spin_unlock(&s3cchan->vc.lock);
0691 
0692     return IRQ_HANDLED;
0693 }
0694 
0695 /*
0696  * The DMA ENGINE API
0697  */
0698 
0699 static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
0700 {
0701     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
0702     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0703     LIST_HEAD(head);
0704     unsigned long flags;
0705     int ret;
0706 
0707     spin_lock_irqsave(&s3cchan->vc.lock, flags);
0708 
0709     if (!s3cchan->phy && !s3cchan->at) {
0710         dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
0711             s3cchan->id);
0712         ret = -EINVAL;
0713         goto unlock;
0714     }
0715 
0716     s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
0717 
0718     /* Mark physical channel as free */
0719     if (s3cchan->phy)
0720         s3c24xx_dma_phy_free(s3cchan);
0721 
0722     /* Dequeue current job */
0723     if (s3cchan->at) {
0724         vchan_terminate_vdesc(&s3cchan->at->vd);
0725         s3cchan->at = NULL;
0726     }
0727 
0728     /* Dequeue jobs not yet fired as well */
0729 
0730     vchan_get_all_descriptors(&s3cchan->vc, &head);
0731 
0732     spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
0733 
0734     vchan_dma_desc_free_list(&s3cchan->vc, &head);
0735 
0736     return 0;
0737 
0738 unlock:
0739     spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
0740 
0741     return ret;
0742 }
0743 
0744 static void s3c24xx_dma_synchronize(struct dma_chan *chan)
0745 {
0746     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
0747 
0748     vchan_synchronize(&s3cchan->vc);
0749 }
0750 
0751 static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
0752 {
0753     /* Ensure all queued descriptors are freed */
0754     vchan_free_chan_resources(to_virt_chan(chan));
0755 }
0756 
0757 static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
0758         dma_cookie_t cookie, struct dma_tx_state *txstate)
0759 {
0760     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
0761     struct s3c24xx_txd *txd;
0762     struct s3c24xx_sg *dsg;
0763     struct virt_dma_desc *vd;
0764     unsigned long flags;
0765     enum dma_status ret;
0766     size_t bytes = 0;
0767 
0768     spin_lock_irqsave(&s3cchan->vc.lock, flags);
0769     ret = dma_cookie_status(chan, cookie, txstate);
0770 
0771     /*
0772      * There's no point calculating the residue if there's
0773      * no txstate to store the value.
0774      */
0775     if (ret == DMA_COMPLETE || !txstate) {
0776         spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
0777         return ret;
0778     }
0779 
0780     vd = vchan_find_desc(&s3cchan->vc, cookie);
0781     if (vd) {
0782         /* On the issued list, so hasn't been processed yet */
0783         txd = to_s3c24xx_txd(&vd->tx);
0784 
0785         list_for_each_entry(dsg, &txd->dsg_list, node)
0786             bytes += dsg->len;
0787     } else {
0788         /*
0789          * Currently running, so sum over the pending sg's and
0790          * the currently active one.
0791          */
0792         txd = s3cchan->at;
0793 
0794         dsg = list_entry(txd->at, struct s3c24xx_sg, node);
0795         list_for_each_entry_from(dsg, &txd->dsg_list, node)
0796             bytes += dsg->len;
0797 
0798         bytes += s3c24xx_dma_getbytes_chan(s3cchan);
0799     }
0800     spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
0801 
0802     /*
0803      * This cookie not complete yet
0804      * Get number of bytes left in the active transactions and queue
0805      */
0806     dma_set_residue(txstate, bytes);
0807 
0808     /* Whether waiting or running, we're in progress */
0809     return ret;
0810 }
0811 
0812 /*
0813  * Initialize a descriptor to be used by memcpy submit
0814  */
0815 static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
0816         struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
0817         size_t len, unsigned long flags)
0818 {
0819     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
0820     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0821     struct s3c24xx_txd *txd;
0822     struct s3c24xx_sg *dsg;
0823     int src_mod, dest_mod;
0824 
0825     dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
0826             len, s3cchan->name);
0827 
0828     if ((len & S3C24XX_DCON_TC_MASK) != len) {
0829         dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
0830         return NULL;
0831     }
0832 
0833     txd = s3c24xx_dma_get_txd();
0834     if (!txd)
0835         return NULL;
0836 
0837     dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
0838     if (!dsg) {
0839         s3c24xx_dma_free_txd(txd);
0840         return NULL;
0841     }
0842     list_add_tail(&dsg->node, &txd->dsg_list);
0843 
0844     dsg->src_addr = src;
0845     dsg->dst_addr = dest;
0846     dsg->len = len;
0847 
0848     /*
0849      * Determine a suitable transfer width.
0850      * The DMA controller cannot fetch/store information which is not
0851      * naturally aligned on the bus, i.e., a 4 byte fetch must start at
0852      * an address divisible by 4 - more generally addr % width must be 0.
0853      */
0854     src_mod = src % 4;
0855     dest_mod = dest % 4;
0856     switch (len % 4) {
0857     case 0:
0858         txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
0859         break;
0860     case 2:
0861         txd->width = ((src_mod == 2 || src_mod == 0) &&
0862                   (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
0863         break;
0864     default:
0865         txd->width = 1;
0866         break;
0867     }
0868 
0869     txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
0870     txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
0871     txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
0872              S3C24XX_DCON_SERV_WHOLE;
0873 
0874     return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
0875 }
0876 
0877 static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
0878     struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
0879     enum dma_transfer_direction direction, unsigned long flags)
0880 {
0881     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
0882     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0883     const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
0884     struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
0885     struct s3c24xx_txd *txd;
0886     struct s3c24xx_sg *dsg;
0887     unsigned sg_len;
0888     dma_addr_t slave_addr;
0889     u32 hwcfg = 0;
0890     int i;
0891 
0892     dev_dbg(&s3cdma->pdev->dev,
0893         "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
0894         size, period, s3cchan->name);
0895 
0896     if (!is_slave_direction(direction)) {
0897         dev_err(&s3cdma->pdev->dev,
0898             "direction %d unsupported\n", direction);
0899         return NULL;
0900     }
0901 
0902     txd = s3c24xx_dma_get_txd();
0903     if (!txd)
0904         return NULL;
0905 
0906     txd->cyclic = 1;
0907 
0908     if (cdata->handshake)
0909         txd->dcon |= S3C24XX_DCON_HANDSHAKE;
0910 
0911     switch (cdata->bus) {
0912     case S3C24XX_DMA_APB:
0913         txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
0914         hwcfg |= S3C24XX_DISRCC_LOC_APB;
0915         break;
0916     case S3C24XX_DMA_AHB:
0917         txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
0918         hwcfg |= S3C24XX_DISRCC_LOC_AHB;
0919         break;
0920     }
0921 
0922     /*
0923      * Always assume our peripheral desintation is a fixed
0924      * address in memory.
0925      */
0926     hwcfg |= S3C24XX_DISRCC_INC_FIXED;
0927 
0928     /*
0929      * Individual dma operations are requested by the slave,
0930      * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
0931      */
0932     txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
0933 
0934     if (direction == DMA_MEM_TO_DEV) {
0935         txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
0936                   S3C24XX_DISRCC_INC_INCREMENT;
0937         txd->didstc = hwcfg;
0938         slave_addr = s3cchan->cfg.dst_addr;
0939         txd->width = s3cchan->cfg.dst_addr_width;
0940     } else {
0941         txd->disrcc = hwcfg;
0942         txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
0943                   S3C24XX_DIDSTC_INC_INCREMENT;
0944         slave_addr = s3cchan->cfg.src_addr;
0945         txd->width = s3cchan->cfg.src_addr_width;
0946     }
0947 
0948     sg_len = size / period;
0949 
0950     for (i = 0; i < sg_len; i++) {
0951         dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
0952         if (!dsg) {
0953             s3c24xx_dma_free_txd(txd);
0954             return NULL;
0955         }
0956         list_add_tail(&dsg->node, &txd->dsg_list);
0957 
0958         dsg->len = period;
0959         /* Check last period length */
0960         if (i == sg_len - 1)
0961             dsg->len = size - period * i;
0962         if (direction == DMA_MEM_TO_DEV) {
0963             dsg->src_addr = addr + period * i;
0964             dsg->dst_addr = slave_addr;
0965         } else { /* DMA_DEV_TO_MEM */
0966             dsg->src_addr = slave_addr;
0967             dsg->dst_addr = addr + period * i;
0968         }
0969     }
0970 
0971     return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
0972 }
0973 
0974 static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
0975         struct dma_chan *chan, struct scatterlist *sgl,
0976         unsigned int sg_len, enum dma_transfer_direction direction,
0977         unsigned long flags, void *context)
0978 {
0979     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
0980     struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
0981     const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
0982     struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
0983     struct s3c24xx_txd *txd;
0984     struct s3c24xx_sg *dsg;
0985     struct scatterlist *sg;
0986     dma_addr_t slave_addr;
0987     u32 hwcfg = 0;
0988     int tmp;
0989 
0990     dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
0991             sg_dma_len(sgl), s3cchan->name);
0992 
0993     txd = s3c24xx_dma_get_txd();
0994     if (!txd)
0995         return NULL;
0996 
0997     if (cdata->handshake)
0998         txd->dcon |= S3C24XX_DCON_HANDSHAKE;
0999 
1000     switch (cdata->bus) {
1001     case S3C24XX_DMA_APB:
1002         txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
1003         hwcfg |= S3C24XX_DISRCC_LOC_APB;
1004         break;
1005     case S3C24XX_DMA_AHB:
1006         txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
1007         hwcfg |= S3C24XX_DISRCC_LOC_AHB;
1008         break;
1009     }
1010 
1011     /*
1012      * Always assume our peripheral desintation is a fixed
1013      * address in memory.
1014      */
1015     hwcfg |= S3C24XX_DISRCC_INC_FIXED;
1016 
1017     /*
1018      * Individual dma operations are requested by the slave,
1019      * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
1020      */
1021     txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
1022 
1023     if (direction == DMA_MEM_TO_DEV) {
1024         txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
1025                   S3C24XX_DISRCC_INC_INCREMENT;
1026         txd->didstc = hwcfg;
1027         slave_addr = s3cchan->cfg.dst_addr;
1028         txd->width = s3cchan->cfg.dst_addr_width;
1029     } else if (direction == DMA_DEV_TO_MEM) {
1030         txd->disrcc = hwcfg;
1031         txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
1032                   S3C24XX_DIDSTC_INC_INCREMENT;
1033         slave_addr = s3cchan->cfg.src_addr;
1034         txd->width = s3cchan->cfg.src_addr_width;
1035     } else {
1036         s3c24xx_dma_free_txd(txd);
1037         dev_err(&s3cdma->pdev->dev,
1038             "direction %d unsupported\n", direction);
1039         return NULL;
1040     }
1041 
1042     for_each_sg(sgl, sg, sg_len, tmp) {
1043         dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
1044         if (!dsg) {
1045             s3c24xx_dma_free_txd(txd);
1046             return NULL;
1047         }
1048         list_add_tail(&dsg->node, &txd->dsg_list);
1049 
1050         dsg->len = sg_dma_len(sg);
1051         if (direction == DMA_MEM_TO_DEV) {
1052             dsg->src_addr = sg_dma_address(sg);
1053             dsg->dst_addr = slave_addr;
1054         } else { /* DMA_DEV_TO_MEM */
1055             dsg->src_addr = slave_addr;
1056             dsg->dst_addr = sg_dma_address(sg);
1057         }
1058     }
1059 
1060     return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
1061 }
1062 
1063 /*
1064  * Slave transactions callback to the slave device to allow
1065  * synchronization of slave DMA signals with the DMAC enable
1066  */
1067 static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
1068 {
1069     struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
1070     unsigned long flags;
1071 
1072     spin_lock_irqsave(&s3cchan->vc.lock, flags);
1073     if (vchan_issue_pending(&s3cchan->vc)) {
1074         if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
1075             s3c24xx_dma_phy_alloc_and_start(s3cchan);
1076     }
1077     spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
1078 }
1079 
1080 /*
1081  * Bringup and teardown
1082  */
1083 
1084 /*
1085  * Initialise the DMAC memcpy/slave channels.
1086  * Make a local wrapper to hold required data
1087  */
1088 static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
1089         struct dma_device *dmadev, unsigned int channels, bool slave)
1090 {
1091     struct s3c24xx_dma_chan *chan;
1092     int i;
1093 
1094     INIT_LIST_HEAD(&dmadev->channels);
1095 
1096     /*
1097      * Register as many many memcpy as we have physical channels,
1098      * we won't always be able to use all but the code will have
1099      * to cope with that situation.
1100      */
1101     for (i = 0; i < channels; i++) {
1102         chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
1103         if (!chan)
1104             return -ENOMEM;
1105 
1106         chan->id = i;
1107         chan->host = s3cdma;
1108         chan->state = S3C24XX_DMA_CHAN_IDLE;
1109 
1110         if (slave) {
1111             chan->slave = true;
1112             chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
1113             if (!chan->name)
1114                 return -ENOMEM;
1115         } else {
1116             chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1117             if (!chan->name)
1118                 return -ENOMEM;
1119         }
1120         dev_dbg(dmadev->dev,
1121              "initialize virtual channel \"%s\"\n",
1122              chan->name);
1123 
1124         chan->vc.desc_free = s3c24xx_dma_desc_free;
1125         vchan_init(&chan->vc, dmadev);
1126     }
1127     dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
1128          i, slave ? "slave" : "memcpy");
1129     return i;
1130 }
1131 
1132 static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
1133 {
1134     struct s3c24xx_dma_chan *chan = NULL;
1135     struct s3c24xx_dma_chan *next;
1136 
1137     list_for_each_entry_safe(chan,
1138                  next, &dmadev->channels, vc.chan.device_node) {
1139         list_del(&chan->vc.chan.device_node);
1140         tasklet_kill(&chan->vc.task);
1141     }
1142 }
1143 
1144 /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
1145 static struct soc_data soc_s3c2410 = {
1146     .stride = 0x40,
1147     .has_reqsel = false,
1148     .has_clocks = false,
1149 };
1150 
1151 /* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
1152 static struct soc_data soc_s3c2412 = {
1153     .stride = 0x40,
1154     .has_reqsel = true,
1155     .has_clocks = true,
1156 };
1157 
1158 /* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
1159 static struct soc_data soc_s3c2443 = {
1160     .stride = 0x100,
1161     .has_reqsel = true,
1162     .has_clocks = true,
1163 };
1164 
1165 static const struct platform_device_id s3c24xx_dma_driver_ids[] = {
1166     {
1167         .name       = "s3c2410-dma",
1168         .driver_data    = (kernel_ulong_t)&soc_s3c2410,
1169     }, {
1170         .name       = "s3c2412-dma",
1171         .driver_data    = (kernel_ulong_t)&soc_s3c2412,
1172     }, {
1173         .name       = "s3c2443-dma",
1174         .driver_data    = (kernel_ulong_t)&soc_s3c2443,
1175     },
1176     { },
1177 };
1178 
1179 static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
1180 {
1181     return (struct soc_data *)
1182              platform_get_device_id(pdev)->driver_data;
1183 }
1184 
1185 static int s3c24xx_dma_probe(struct platform_device *pdev)
1186 {
1187     const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1188     struct s3c24xx_dma_engine *s3cdma;
1189     struct soc_data *sdata;
1190     struct resource *res;
1191     int ret;
1192     int i;
1193 
1194     if (!pdata) {
1195         dev_err(&pdev->dev, "platform data missing\n");
1196         return -ENODEV;
1197     }
1198 
1199     /* Basic sanity check */
1200     if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
1201         dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
1202             pdata->num_phy_channels, MAX_DMA_CHANNELS);
1203         return -EINVAL;
1204     }
1205 
1206     sdata = s3c24xx_dma_get_soc_data(pdev);
1207     if (!sdata)
1208         return -EINVAL;
1209 
1210     s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
1211     if (!s3cdma)
1212         return -ENOMEM;
1213 
1214     s3cdma->pdev = pdev;
1215     s3cdma->pdata = pdata;
1216     s3cdma->sdata = sdata;
1217 
1218     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1219     s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
1220     if (IS_ERR(s3cdma->base))
1221         return PTR_ERR(s3cdma->base);
1222 
1223     s3cdma->phy_chans = devm_kcalloc(&pdev->dev,
1224                           pdata->num_phy_channels,
1225                           sizeof(struct s3c24xx_dma_phy),
1226                           GFP_KERNEL);
1227     if (!s3cdma->phy_chans)
1228         return -ENOMEM;
1229 
1230     /* acquire irqs and clocks for all physical channels */
1231     for (i = 0; i < pdata->num_phy_channels; i++) {
1232         struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1233         char clk_name[6];
1234 
1235         phy->id = i;
1236         phy->base = s3cdma->base + (i * sdata->stride);
1237         phy->host = s3cdma;
1238 
1239         phy->irq = platform_get_irq(pdev, i);
1240         if (phy->irq < 0)
1241             continue;
1242 
1243         ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
1244                        0, pdev->name, phy);
1245         if (ret) {
1246             dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
1247                 i, ret);
1248             continue;
1249         }
1250 
1251         if (sdata->has_clocks) {
1252             sprintf(clk_name, "dma.%d", i);
1253             phy->clk = devm_clk_get(&pdev->dev, clk_name);
1254             if (IS_ERR(phy->clk) && sdata->has_clocks) {
1255                 dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
1256                     i, PTR_ERR(phy->clk));
1257                 continue;
1258             }
1259 
1260             ret = clk_prepare(phy->clk);
1261             if (ret) {
1262                 dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
1263                     i, ret);
1264                 continue;
1265             }
1266         }
1267 
1268         spin_lock_init(&phy->lock);
1269         phy->valid = true;
1270 
1271         dev_dbg(&pdev->dev, "physical channel %d is %s\n",
1272             i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
1273     }
1274 
1275     /* Initialize memcpy engine */
1276     dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
1277     dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
1278     s3cdma->memcpy.dev = &pdev->dev;
1279     s3cdma->memcpy.device_free_chan_resources =
1280                     s3c24xx_dma_free_chan_resources;
1281     s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
1282     s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
1283     s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
1284     s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
1285     s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
1286     s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
1287 
1288     /* Initialize slave engine for SoC internal dedicated peripherals */
1289     dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
1290     dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
1291     dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
1292     s3cdma->slave.dev = &pdev->dev;
1293     s3cdma->slave.device_free_chan_resources =
1294                     s3c24xx_dma_free_chan_resources;
1295     s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
1296     s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1297     s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
1298     s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
1299     s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
1300     s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
1301     s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
1302     s3cdma->slave.filter.map = pdata->slave_map;
1303     s3cdma->slave.filter.mapcnt = pdata->slavecnt;
1304     s3cdma->slave.filter.fn = s3c24xx_dma_filter;
1305 
1306     /* Register as many memcpy channels as there are physical channels */
1307     ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
1308                         pdata->num_phy_channels, false);
1309     if (ret <= 0) {
1310         dev_warn(&pdev->dev,
1311              "%s failed to enumerate memcpy channels - %d\n",
1312              __func__, ret);
1313         goto err_memcpy;
1314     }
1315 
1316     /* Register slave channels */
1317     ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
1318                 pdata->num_channels, true);
1319     if (ret <= 0) {
1320         dev_warn(&pdev->dev,
1321             "%s failed to enumerate slave channels - %d\n",
1322                 __func__, ret);
1323         goto err_slave;
1324     }
1325 
1326     ret = dma_async_device_register(&s3cdma->memcpy);
1327     if (ret) {
1328         dev_warn(&pdev->dev,
1329             "%s failed to register memcpy as an async device - %d\n",
1330             __func__, ret);
1331         goto err_memcpy_reg;
1332     }
1333 
1334     ret = dma_async_device_register(&s3cdma->slave);
1335     if (ret) {
1336         dev_warn(&pdev->dev,
1337             "%s failed to register slave as an async device - %d\n",
1338             __func__, ret);
1339         goto err_slave_reg;
1340     }
1341 
1342     platform_set_drvdata(pdev, s3cdma);
1343     dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
1344          pdata->num_phy_channels);
1345 
1346     return 0;
1347 
1348 err_slave_reg:
1349     dma_async_device_unregister(&s3cdma->memcpy);
1350 err_memcpy_reg:
1351     s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1352 err_slave:
1353     s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1354 err_memcpy:
1355     if (sdata->has_clocks)
1356         for (i = 0; i < pdata->num_phy_channels; i++) {
1357             struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1358             if (phy->valid)
1359                 clk_unprepare(phy->clk);
1360         }
1361 
1362     return ret;
1363 }
1364 
1365 static void s3c24xx_dma_free_irq(struct platform_device *pdev,
1366                 struct s3c24xx_dma_engine *s3cdma)
1367 {
1368     int i;
1369 
1370     for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
1371         struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1372 
1373         devm_free_irq(&pdev->dev, phy->irq, phy);
1374     }
1375 }
1376 
1377 static int s3c24xx_dma_remove(struct platform_device *pdev)
1378 {
1379     const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1380     struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
1381     struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
1382     int i;
1383 
1384     dma_async_device_unregister(&s3cdma->slave);
1385     dma_async_device_unregister(&s3cdma->memcpy);
1386 
1387     s3c24xx_dma_free_irq(pdev, s3cdma);
1388 
1389     s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1390     s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1391 
1392     if (sdata->has_clocks)
1393         for (i = 0; i < pdata->num_phy_channels; i++) {
1394             struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1395             if (phy->valid)
1396                 clk_unprepare(phy->clk);
1397         }
1398 
1399     return 0;
1400 }
1401 
1402 static struct platform_driver s3c24xx_dma_driver = {
1403     .driver     = {
1404         .name   = "s3c24xx-dma",
1405     },
1406     .id_table   = s3c24xx_dma_driver_ids,
1407     .probe      = s3c24xx_dma_probe,
1408     .remove     = s3c24xx_dma_remove,
1409 };
1410 
1411 module_platform_driver(s3c24xx_dma_driver);
1412 
1413 bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
1414 {
1415     struct s3c24xx_dma_chan *s3cchan;
1416 
1417     if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
1418         return false;
1419 
1420     s3cchan = to_s3c24xx_dma_chan(chan);
1421 
1422     return s3cchan->id == (uintptr_t)param;
1423 }
1424 EXPORT_SYMBOL(s3c24xx_dma_filter);
1425 
1426 MODULE_DESCRIPTION("S3C24XX DMA Driver");
1427 MODULE_AUTHOR("Heiko Stuebner");
1428 MODULE_LICENSE("GPL v2");