Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * timb_dma.c timberdale FPGA DMA driver
0004  * Copyright (c) 2010 Intel Corporation
0005  */
0006 
0007 /* Supports:
0008  * Timberdale FPGA DMA engine
0009  */
0010 
0011 #include <linux/dmaengine.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/init.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/io.h>
0016 #include <linux/module.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/slab.h>
0019 
0020 #include <linux/timb_dma.h>
0021 
0022 #include "dmaengine.h"
0023 
0024 #define DRIVER_NAME "timb-dma"
0025 
0026 /* Global DMA registers */
0027 #define TIMBDMA_ACR     0x34
0028 #define TIMBDMA_32BIT_ADDR  0x01
0029 
0030 #define TIMBDMA_ISR     0x080000
0031 #define TIMBDMA_IPR     0x080004
0032 #define TIMBDMA_IER     0x080008
0033 
0034 /* Channel specific registers */
0035 /* RX instances base addresses are 0x00, 0x40, 0x80 ...
0036  * TX instances base addresses are 0x18, 0x58, 0x98 ...
0037  */
0038 #define TIMBDMA_INSTANCE_OFFSET     0x40
0039 #define TIMBDMA_INSTANCE_TX_OFFSET  0x18
0040 
0041 /* RX registers, relative the instance base */
0042 #define TIMBDMA_OFFS_RX_DHAR    0x00
0043 #define TIMBDMA_OFFS_RX_DLAR    0x04
0044 #define TIMBDMA_OFFS_RX_LR  0x0C
0045 #define TIMBDMA_OFFS_RX_BLR 0x10
0046 #define TIMBDMA_OFFS_RX_ER  0x14
0047 #define TIMBDMA_RX_EN       0x01
0048 /* bytes per Row, video specific register
0049  * which is placed after the TX registers...
0050  */
0051 #define TIMBDMA_OFFS_RX_BPRR    0x30
0052 
0053 /* TX registers, relative the instance base */
0054 #define TIMBDMA_OFFS_TX_DHAR    0x00
0055 #define TIMBDMA_OFFS_TX_DLAR    0x04
0056 #define TIMBDMA_OFFS_TX_BLR 0x0C
0057 #define TIMBDMA_OFFS_TX_LR  0x14
0058 
0059 
0060 #define TIMB_DMA_DESC_SIZE  8
0061 
0062 struct timb_dma_desc {
0063     struct list_head        desc_node;
0064     struct dma_async_tx_descriptor  txd;
0065     u8              *desc_list;
0066     unsigned int            desc_list_len;
0067     bool                interrupt;
0068 };
0069 
0070 struct timb_dma_chan {
0071     struct dma_chan     chan;
0072     void __iomem        *membase;
0073     spinlock_t      lock; /* Used to protect data structures,
0074                     especially the lists and descriptors,
0075                     from races between the tasklet and calls
0076                     from above */
0077     bool            ongoing;
0078     struct list_head    active_list;
0079     struct list_head    queue;
0080     struct list_head    free_list;
0081     unsigned int        bytes_per_line;
0082     enum dma_transfer_direction direction;
0083     unsigned int        descs; /* Descriptors to allocate */
0084     unsigned int        desc_elems; /* number of elems per descriptor */
0085 };
0086 
0087 struct timb_dma {
0088     struct dma_device   dma;
0089     void __iomem        *membase;
0090     struct tasklet_struct   tasklet;
0091     struct timb_dma_chan    channels[];
0092 };
0093 
0094 static struct device *chan2dev(struct dma_chan *chan)
0095 {
0096     return &chan->dev->device;
0097 }
0098 static struct device *chan2dmadev(struct dma_chan *chan)
0099 {
0100     return chan2dev(chan)->parent->parent;
0101 }
0102 
0103 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
0104 {
0105     int id = td_chan->chan.chan_id;
0106     return (struct timb_dma *)((u8 *)td_chan -
0107         id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
0108 }
0109 
0110 /* Must be called with the spinlock held */
0111 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
0112 {
0113     int id = td_chan->chan.chan_id;
0114     struct timb_dma *td = tdchantotd(td_chan);
0115     u32 ier;
0116 
0117     /* enable interrupt for this channel */
0118     ier = ioread32(td->membase + TIMBDMA_IER);
0119     ier |= 1 << id;
0120     dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
0121         ier);
0122     iowrite32(ier, td->membase + TIMBDMA_IER);
0123 }
0124 
0125 /* Should be called with the spinlock held */
0126 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
0127 {
0128     int id = td_chan->chan.chan_id;
0129     struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
0130         id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
0131     u32 isr;
0132     bool done = false;
0133 
0134     dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
0135 
0136     isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
0137     if (isr) {
0138         iowrite32(isr, td->membase + TIMBDMA_ISR);
0139         done = true;
0140     }
0141 
0142     return done;
0143 }
0144 
0145 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
0146     struct scatterlist *sg, bool last)
0147 {
0148     if (sg_dma_len(sg) > USHRT_MAX) {
0149         dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
0150         return -EINVAL;
0151     }
0152 
0153     /* length must be word aligned */
0154     if (sg_dma_len(sg) % sizeof(u32)) {
0155         dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
0156             sg_dma_len(sg));
0157         return -EINVAL;
0158     }
0159 
0160     dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
0161         dma_desc, (unsigned long long)sg_dma_address(sg));
0162 
0163     dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
0164     dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
0165     dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
0166     dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
0167 
0168     dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
0169     dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
0170 
0171     dma_desc[1] = 0x00;
0172     dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
0173 
0174     return 0;
0175 }
0176 
0177 /* Must be called with the spinlock held */
0178 static void __td_start_dma(struct timb_dma_chan *td_chan)
0179 {
0180     struct timb_dma_desc *td_desc;
0181 
0182     if (td_chan->ongoing) {
0183         dev_err(chan2dev(&td_chan->chan),
0184             "Transfer already ongoing\n");
0185         return;
0186     }
0187 
0188     td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
0189         desc_node);
0190 
0191     dev_dbg(chan2dev(&td_chan->chan),
0192         "td_chan: %p, chan: %d, membase: %p\n",
0193         td_chan, td_chan->chan.chan_id, td_chan->membase);
0194 
0195     if (td_chan->direction == DMA_DEV_TO_MEM) {
0196 
0197         /* descriptor address */
0198         iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
0199         iowrite32(td_desc->txd.phys, td_chan->membase +
0200             TIMBDMA_OFFS_RX_DLAR);
0201         /* Bytes per line */
0202         iowrite32(td_chan->bytes_per_line, td_chan->membase +
0203             TIMBDMA_OFFS_RX_BPRR);
0204         /* enable RX */
0205         iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
0206     } else {
0207         /* address high */
0208         iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
0209         iowrite32(td_desc->txd.phys, td_chan->membase +
0210             TIMBDMA_OFFS_TX_DLAR);
0211     }
0212 
0213     td_chan->ongoing = true;
0214 
0215     if (td_desc->interrupt)
0216         __td_enable_chan_irq(td_chan);
0217 }
0218 
0219 static void __td_finish(struct timb_dma_chan *td_chan)
0220 {
0221     struct dmaengine_desc_callback  cb;
0222     struct dma_async_tx_descriptor  *txd;
0223     struct timb_dma_desc        *td_desc;
0224 
0225     /* can happen if the descriptor is canceled */
0226     if (list_empty(&td_chan->active_list))
0227         return;
0228 
0229     td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
0230         desc_node);
0231     txd = &td_desc->txd;
0232 
0233     dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
0234         txd->cookie);
0235 
0236     /* make sure to stop the transfer */
0237     if (td_chan->direction == DMA_DEV_TO_MEM)
0238         iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
0239 /* Currently no support for stopping DMA transfers
0240     else
0241         iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
0242 */
0243     dma_cookie_complete(txd);
0244     td_chan->ongoing = false;
0245 
0246     dmaengine_desc_get_callback(txd, &cb);
0247 
0248     list_move(&td_desc->desc_node, &td_chan->free_list);
0249 
0250     dma_descriptor_unmap(txd);
0251     /*
0252      * The API requires that no submissions are done from a
0253      * callback, so we don't need to drop the lock here
0254      */
0255     dmaengine_desc_callback_invoke(&cb, NULL);
0256 }
0257 
0258 static u32 __td_ier_mask(struct timb_dma *td)
0259 {
0260     int i;
0261     u32 ret = 0;
0262 
0263     for (i = 0; i < td->dma.chancnt; i++) {
0264         struct timb_dma_chan *td_chan = td->channels + i;
0265         if (td_chan->ongoing) {
0266             struct timb_dma_desc *td_desc =
0267                 list_entry(td_chan->active_list.next,
0268                 struct timb_dma_desc, desc_node);
0269             if (td_desc->interrupt)
0270                 ret |= 1 << i;
0271         }
0272     }
0273 
0274     return ret;
0275 }
0276 
0277 static void __td_start_next(struct timb_dma_chan *td_chan)
0278 {
0279     struct timb_dma_desc *td_desc;
0280 
0281     BUG_ON(list_empty(&td_chan->queue));
0282     BUG_ON(td_chan->ongoing);
0283 
0284     td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
0285         desc_node);
0286 
0287     dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
0288         __func__, td_desc->txd.cookie);
0289 
0290     list_move(&td_desc->desc_node, &td_chan->active_list);
0291     __td_start_dma(td_chan);
0292 }
0293 
0294 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
0295 {
0296     struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
0297         txd);
0298     struct timb_dma_chan *td_chan = container_of(txd->chan,
0299         struct timb_dma_chan, chan);
0300     dma_cookie_t cookie;
0301 
0302     spin_lock_bh(&td_chan->lock);
0303     cookie = dma_cookie_assign(txd);
0304 
0305     if (list_empty(&td_chan->active_list)) {
0306         dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
0307             txd->cookie);
0308         list_add_tail(&td_desc->desc_node, &td_chan->active_list);
0309         __td_start_dma(td_chan);
0310     } else {
0311         dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
0312             txd->cookie);
0313 
0314         list_add_tail(&td_desc->desc_node, &td_chan->queue);
0315     }
0316 
0317     spin_unlock_bh(&td_chan->lock);
0318 
0319     return cookie;
0320 }
0321 
0322 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
0323 {
0324     struct dma_chan *chan = &td_chan->chan;
0325     struct timb_dma_desc *td_desc;
0326     int err;
0327 
0328     td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
0329     if (!td_desc)
0330         goto out;
0331 
0332     td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
0333 
0334     td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
0335     if (!td_desc->desc_list)
0336         goto err;
0337 
0338     dma_async_tx_descriptor_init(&td_desc->txd, chan);
0339     td_desc->txd.tx_submit = td_tx_submit;
0340     td_desc->txd.flags = DMA_CTRL_ACK;
0341 
0342     td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
0343         td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
0344 
0345     err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
0346     if (err) {
0347         dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
0348         goto err;
0349     }
0350 
0351     return td_desc;
0352 err:
0353     kfree(td_desc->desc_list);
0354     kfree(td_desc);
0355 out:
0356     return NULL;
0357 
0358 }
0359 
0360 static void td_free_desc(struct timb_dma_desc *td_desc)
0361 {
0362     dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
0363     dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
0364         td_desc->desc_list_len, DMA_TO_DEVICE);
0365 
0366     kfree(td_desc->desc_list);
0367     kfree(td_desc);
0368 }
0369 
0370 static void td_desc_put(struct timb_dma_chan *td_chan,
0371     struct timb_dma_desc *td_desc)
0372 {
0373     dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
0374 
0375     spin_lock_bh(&td_chan->lock);
0376     list_add(&td_desc->desc_node, &td_chan->free_list);
0377     spin_unlock_bh(&td_chan->lock);
0378 }
0379 
0380 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
0381 {
0382     struct timb_dma_desc *td_desc, *_td_desc;
0383     struct timb_dma_desc *ret = NULL;
0384 
0385     spin_lock_bh(&td_chan->lock);
0386     list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
0387         desc_node) {
0388         if (async_tx_test_ack(&td_desc->txd)) {
0389             list_del(&td_desc->desc_node);
0390             ret = td_desc;
0391             break;
0392         }
0393         dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
0394             td_desc);
0395     }
0396     spin_unlock_bh(&td_chan->lock);
0397 
0398     return ret;
0399 }
0400 
0401 static int td_alloc_chan_resources(struct dma_chan *chan)
0402 {
0403     struct timb_dma_chan *td_chan =
0404         container_of(chan, struct timb_dma_chan, chan);
0405     int i;
0406 
0407     dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
0408 
0409     BUG_ON(!list_empty(&td_chan->free_list));
0410     for (i = 0; i < td_chan->descs; i++) {
0411         struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
0412         if (!td_desc) {
0413             if (i)
0414                 break;
0415             else {
0416                 dev_err(chan2dev(chan),
0417                     "Couldn't allocate any descriptors\n");
0418                 return -ENOMEM;
0419             }
0420         }
0421 
0422         td_desc_put(td_chan, td_desc);
0423     }
0424 
0425     spin_lock_bh(&td_chan->lock);
0426     dma_cookie_init(chan);
0427     spin_unlock_bh(&td_chan->lock);
0428 
0429     return 0;
0430 }
0431 
0432 static void td_free_chan_resources(struct dma_chan *chan)
0433 {
0434     struct timb_dma_chan *td_chan =
0435         container_of(chan, struct timb_dma_chan, chan);
0436     struct timb_dma_desc *td_desc, *_td_desc;
0437     LIST_HEAD(list);
0438 
0439     dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
0440 
0441     /* check that all descriptors are free */
0442     BUG_ON(!list_empty(&td_chan->active_list));
0443     BUG_ON(!list_empty(&td_chan->queue));
0444 
0445     spin_lock_bh(&td_chan->lock);
0446     list_splice_init(&td_chan->free_list, &list);
0447     spin_unlock_bh(&td_chan->lock);
0448 
0449     list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
0450         dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
0451             td_desc);
0452         td_free_desc(td_desc);
0453     }
0454 }
0455 
0456 static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
0457                     struct dma_tx_state *txstate)
0458 {
0459     enum dma_status ret;
0460 
0461     dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
0462 
0463     ret = dma_cookie_status(chan, cookie, txstate);
0464 
0465     dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n",  __func__, ret);
0466 
0467     return ret;
0468 }
0469 
0470 static void td_issue_pending(struct dma_chan *chan)
0471 {
0472     struct timb_dma_chan *td_chan =
0473         container_of(chan, struct timb_dma_chan, chan);
0474 
0475     dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
0476     spin_lock_bh(&td_chan->lock);
0477 
0478     if (!list_empty(&td_chan->active_list))
0479         /* transfer ongoing */
0480         if (__td_dma_done_ack(td_chan))
0481             __td_finish(td_chan);
0482 
0483     if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
0484         __td_start_next(td_chan);
0485 
0486     spin_unlock_bh(&td_chan->lock);
0487 }
0488 
0489 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
0490     struct scatterlist *sgl, unsigned int sg_len,
0491     enum dma_transfer_direction direction, unsigned long flags,
0492     void *context)
0493 {
0494     struct timb_dma_chan *td_chan =
0495         container_of(chan, struct timb_dma_chan, chan);
0496     struct timb_dma_desc *td_desc;
0497     struct scatterlist *sg;
0498     unsigned int i;
0499     unsigned int desc_usage = 0;
0500 
0501     if (!sgl || !sg_len) {
0502         dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
0503         return NULL;
0504     }
0505 
0506     /* even channels are for RX, odd for TX */
0507     if (td_chan->direction != direction) {
0508         dev_err(chan2dev(chan),
0509             "Requesting channel in wrong direction\n");
0510         return NULL;
0511     }
0512 
0513     td_desc = td_desc_get(td_chan);
0514     if (!td_desc) {
0515         dev_err(chan2dev(chan), "Not enough descriptors available\n");
0516         return NULL;
0517     }
0518 
0519     td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
0520 
0521     for_each_sg(sgl, sg, sg_len, i) {
0522         int err;
0523         if (desc_usage > td_desc->desc_list_len) {
0524             dev_err(chan2dev(chan), "No descriptor space\n");
0525             return NULL;
0526         }
0527 
0528         err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
0529             i == (sg_len - 1));
0530         if (err) {
0531             dev_err(chan2dev(chan), "Failed to update desc: %d\n",
0532                 err);
0533             td_desc_put(td_chan, td_desc);
0534             return NULL;
0535         }
0536         desc_usage += TIMB_DMA_DESC_SIZE;
0537     }
0538 
0539     dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
0540         td_desc->desc_list_len, DMA_TO_DEVICE);
0541 
0542     return &td_desc->txd;
0543 }
0544 
0545 static int td_terminate_all(struct dma_chan *chan)
0546 {
0547     struct timb_dma_chan *td_chan =
0548         container_of(chan, struct timb_dma_chan, chan);
0549     struct timb_dma_desc *td_desc, *_td_desc;
0550 
0551     dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
0552 
0553     /* first the easy part, put the queue into the free list */
0554     spin_lock_bh(&td_chan->lock);
0555     list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
0556         desc_node)
0557         list_move(&td_desc->desc_node, &td_chan->free_list);
0558 
0559     /* now tear down the running */
0560     __td_finish(td_chan);
0561     spin_unlock_bh(&td_chan->lock);
0562 
0563     return 0;
0564 }
0565 
0566 static void td_tasklet(struct tasklet_struct *t)
0567 {
0568     struct timb_dma *td = from_tasklet(td, t, tasklet);
0569     u32 isr;
0570     u32 ipr;
0571     u32 ier;
0572     int i;
0573 
0574     isr = ioread32(td->membase + TIMBDMA_ISR);
0575     ipr = isr & __td_ier_mask(td);
0576 
0577     /* ack the interrupts */
0578     iowrite32(ipr, td->membase + TIMBDMA_ISR);
0579 
0580     for (i = 0; i < td->dma.chancnt; i++)
0581         if (ipr & (1 << i)) {
0582             struct timb_dma_chan *td_chan = td->channels + i;
0583             spin_lock(&td_chan->lock);
0584             __td_finish(td_chan);
0585             if (!list_empty(&td_chan->queue))
0586                 __td_start_next(td_chan);
0587             spin_unlock(&td_chan->lock);
0588         }
0589 
0590     ier = __td_ier_mask(td);
0591     iowrite32(ier, td->membase + TIMBDMA_IER);
0592 }
0593 
0594 
0595 static irqreturn_t td_irq(int irq, void *devid)
0596 {
0597     struct timb_dma *td = devid;
0598     u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
0599 
0600     if (ipr) {
0601         /* disable interrupts, will be re-enabled in tasklet */
0602         iowrite32(0, td->membase + TIMBDMA_IER);
0603 
0604         tasklet_schedule(&td->tasklet);
0605 
0606         return IRQ_HANDLED;
0607     } else
0608         return IRQ_NONE;
0609 }
0610 
0611 
0612 static int td_probe(struct platform_device *pdev)
0613 {
0614     struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
0615     struct timb_dma *td;
0616     struct resource *iomem;
0617     int irq;
0618     int err;
0619     int i;
0620 
0621     if (!pdata) {
0622         dev_err(&pdev->dev, "No platform data\n");
0623         return -EINVAL;
0624     }
0625 
0626     iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0627     if (!iomem)
0628         return -EINVAL;
0629 
0630     irq = platform_get_irq(pdev, 0);
0631     if (irq < 0)
0632         return irq;
0633 
0634     if (!request_mem_region(iomem->start, resource_size(iomem),
0635         DRIVER_NAME))
0636         return -EBUSY;
0637 
0638     td  = kzalloc(struct_size(td, channels, pdata->nr_channels),
0639               GFP_KERNEL);
0640     if (!td) {
0641         err = -ENOMEM;
0642         goto err_release_region;
0643     }
0644 
0645     dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
0646 
0647     td->membase = ioremap(iomem->start, resource_size(iomem));
0648     if (!td->membase) {
0649         dev_err(&pdev->dev, "Failed to remap I/O memory\n");
0650         err = -ENOMEM;
0651         goto err_free_mem;
0652     }
0653 
0654     /* 32bit addressing */
0655     iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
0656 
0657     /* disable and clear any interrupts */
0658     iowrite32(0x0, td->membase + TIMBDMA_IER);
0659     iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
0660 
0661     tasklet_setup(&td->tasklet, td_tasklet);
0662 
0663     err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
0664     if (err) {
0665         dev_err(&pdev->dev, "Failed to request IRQ\n");
0666         goto err_tasklet_kill;
0667     }
0668 
0669     td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
0670     td->dma.device_free_chan_resources  = td_free_chan_resources;
0671     td->dma.device_tx_status        = td_tx_status;
0672     td->dma.device_issue_pending        = td_issue_pending;
0673 
0674     dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
0675     dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
0676     td->dma.device_prep_slave_sg = td_prep_slave_sg;
0677     td->dma.device_terminate_all = td_terminate_all;
0678 
0679     td->dma.dev = &pdev->dev;
0680 
0681     INIT_LIST_HEAD(&td->dma.channels);
0682 
0683     for (i = 0; i < pdata->nr_channels; i++) {
0684         struct timb_dma_chan *td_chan = &td->channels[i];
0685         struct timb_dma_platform_data_channel *pchan =
0686             pdata->channels + i;
0687 
0688         /* even channels are RX, odd are TX */
0689         if ((i % 2) == pchan->rx) {
0690             dev_err(&pdev->dev, "Wrong channel configuration\n");
0691             err = -EINVAL;
0692             goto err_free_irq;
0693         }
0694 
0695         td_chan->chan.device = &td->dma;
0696         dma_cookie_init(&td_chan->chan);
0697         spin_lock_init(&td_chan->lock);
0698         INIT_LIST_HEAD(&td_chan->active_list);
0699         INIT_LIST_HEAD(&td_chan->queue);
0700         INIT_LIST_HEAD(&td_chan->free_list);
0701 
0702         td_chan->descs = pchan->descriptors;
0703         td_chan->desc_elems = pchan->descriptor_elements;
0704         td_chan->bytes_per_line = pchan->bytes_per_line;
0705         td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
0706             DMA_MEM_TO_DEV;
0707 
0708         td_chan->membase = td->membase +
0709             (i / 2) * TIMBDMA_INSTANCE_OFFSET +
0710             (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
0711 
0712         dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
0713             i, td_chan->membase);
0714 
0715         list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
0716     }
0717 
0718     err = dma_async_device_register(&td->dma);
0719     if (err) {
0720         dev_err(&pdev->dev, "Failed to register async device\n");
0721         goto err_free_irq;
0722     }
0723 
0724     platform_set_drvdata(pdev, td);
0725 
0726     dev_dbg(&pdev->dev, "Probe result: %d\n", err);
0727     return err;
0728 
0729 err_free_irq:
0730     free_irq(irq, td);
0731 err_tasklet_kill:
0732     tasklet_kill(&td->tasklet);
0733     iounmap(td->membase);
0734 err_free_mem:
0735     kfree(td);
0736 err_release_region:
0737     release_mem_region(iomem->start, resource_size(iomem));
0738 
0739     return err;
0740 
0741 }
0742 
0743 static int td_remove(struct platform_device *pdev)
0744 {
0745     struct timb_dma *td = platform_get_drvdata(pdev);
0746     struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0747     int irq = platform_get_irq(pdev, 0);
0748 
0749     dma_async_device_unregister(&td->dma);
0750     free_irq(irq, td);
0751     tasklet_kill(&td->tasklet);
0752     iounmap(td->membase);
0753     kfree(td);
0754     release_mem_region(iomem->start, resource_size(iomem));
0755 
0756     dev_dbg(&pdev->dev, "Removed...\n");
0757     return 0;
0758 }
0759 
0760 static struct platform_driver td_driver = {
0761     .driver = {
0762         .name   = DRIVER_NAME,
0763     },
0764     .probe  = td_probe,
0765     .remove = td_remove,
0766 };
0767 
0768 module_platform_driver(td_driver);
0769 
0770 MODULE_LICENSE("GPL v2");
0771 MODULE_DESCRIPTION("Timberdale DMA controller driver");
0772 MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
0773 MODULE_ALIAS("platform:"DRIVER_NAME);