Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0
0002  *
0003  * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
0004  *
0005  * extracted from shdma.c and headers
0006  *
0007  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
0008  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
0009  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
0010  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
0011  */
0012 
0013 #ifndef SHDMA_BASE_H
0014 #define SHDMA_BASE_H
0015 
0016 #include <linux/dmaengine.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/list.h>
0019 #include <linux/types.h>
0020 
0021 /**
0022  * shdma_pm_state - DMA channel PM state
0023  * SHDMA_PM_ESTABLISHED:    either idle or during data transfer
0024  * SHDMA_PM_BUSY:       during the transfer preparation, when we have to
0025  *              drop the lock temporarily
0026  * SHDMA_PM_PENDING:    transfers pending
0027  */
0028 enum shdma_pm_state {
0029     SHDMA_PM_ESTABLISHED,
0030     SHDMA_PM_BUSY,
0031     SHDMA_PM_PENDING,
0032 };
0033 
0034 struct device;
0035 
0036 /*
0037  * Drivers, using this library are expected to embed struct shdma_dev,
0038  * struct shdma_chan, struct shdma_desc, and struct shdma_slave
0039  * in their respective device, channel, descriptor and slave objects.
0040  */
0041 
0042 struct shdma_slave {
0043     int slave_id;
0044 };
0045 
0046 struct shdma_desc {
0047     struct list_head node;
0048     struct dma_async_tx_descriptor async_tx;
0049     enum dma_transfer_direction direction;
0050     size_t partial;
0051     dma_cookie_t cookie;
0052     int chunks;
0053     int mark;
0054     bool cyclic;            /* used as cyclic transfer */
0055 };
0056 
0057 struct shdma_chan {
0058     spinlock_t chan_lock;       /* Channel operation lock */
0059     struct list_head ld_queue;  /* Link descriptors queue */
0060     struct list_head ld_free;   /* Free link descriptors */
0061     struct dma_chan dma_chan;   /* DMA channel */
0062     struct device *dev;     /* Channel device */
0063     void *desc;         /* buffer for descriptor array */
0064     int desc_num;           /* desc count */
0065     size_t max_xfer_len;        /* max transfer length */
0066     int id;             /* Raw id of this channel */
0067     int irq;            /* Channel IRQ */
0068     int slave_id;           /* Client ID for slave DMA */
0069     int real_slave_id;      /* argument passed to filter function */
0070     int hw_req;         /* DMA request line for slave DMA - same
0071                      * as MID/RID, used with DT */
0072     enum shdma_pm_state pm_state;
0073 };
0074 
0075 /**
0076  * struct shdma_ops - simple DMA driver operations
0077  * desc_completed:  return true, if this is the descriptor, that just has
0078  *          completed (atomic)
0079  * halt_channel:    stop DMA channel operation (atomic)
0080  * channel_busy:    return true, if the channel is busy (atomic)
0081  * slave_addr:      return slave DMA address
0082  * desc_setup:      set up the hardware specific descriptor portion (atomic)
0083  * set_slave:       bind channel to a slave
0084  * setup_xfer:      configure channel hardware for operation (atomic)
0085  * start_xfer:      start the DMA transfer (atomic)
0086  * embedded_desc:   return Nth struct shdma_desc pointer from the
0087  *          descriptor array
0088  * chan_irq:        process channel IRQ, return true if a transfer has
0089  *          completed (atomic)
0090  */
0091 struct shdma_ops {
0092     bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
0093     void (*halt_channel)(struct shdma_chan *);
0094     bool (*channel_busy)(struct shdma_chan *);
0095     dma_addr_t (*slave_addr)(struct shdma_chan *);
0096     int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
0097               dma_addr_t, dma_addr_t, size_t *);
0098     int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
0099     void (*setup_xfer)(struct shdma_chan *, int);
0100     void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
0101     struct shdma_desc *(*embedded_desc)(void *, int);
0102     bool (*chan_irq)(struct shdma_chan *, int);
0103     size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
0104 };
0105 
0106 struct shdma_dev {
0107     struct dma_device dma_dev;
0108     struct shdma_chan **schan;
0109     const struct shdma_ops *ops;
0110     size_t desc_size;
0111 };
0112 
0113 #define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
0114                 i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
0115 
0116 int shdma_request_irq(struct shdma_chan *, int,
0117                unsigned long, const char *);
0118 bool shdma_reset(struct shdma_dev *sdev);
0119 void shdma_chan_probe(struct shdma_dev *sdev,
0120                struct shdma_chan *schan, int id);
0121 void shdma_chan_remove(struct shdma_chan *schan);
0122 int shdma_init(struct device *dev, struct shdma_dev *sdev,
0123             int chan_num);
0124 void shdma_cleanup(struct shdma_dev *sdev);
0125 #if IS_ENABLED(CONFIG_SH_DMAE_BASE)
0126 bool shdma_chan_filter(struct dma_chan *chan, void *arg);
0127 #else
0128 static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
0129 {
0130     return false;
0131 }
0132 #endif
0133 
0134 #endif