Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * BRIEF MODULE DESCRIPTION
0003  *  Defines for using and allocating DMA channels on the Alchemy
0004  *      Au1x00 MIPS processors.
0005  *
0006  * Copyright 2000, 2008 MontaVista Software Inc.
0007  * Author: MontaVista Software, Inc. <source@mvista.com>
0008  *
0009  *  This program is free software; you can redistribute  it and/or modify it
0010  *  under  the terms of  the GNU General  Public License as published by the
0011  *  Free Software Foundation;  either version 2 of the  License, or (at your
0012  *  option) any later version.
0013  *
0014  *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
0015  *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
0016  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
0017  *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
0018  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
0019  *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
0020  *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
0021  *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
0022  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
0023  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0024  *
0025  *  You should have received a copy of the  GNU General Public License along
0026  *  with this program; if not, write  to the Free Software Foundation, Inc.,
0027  *  675 Mass Ave, Cambridge, MA 02139, USA.
0028  *
0029  */
0030 #ifndef __ASM_AU1000_DMA_H
0031 #define __ASM_AU1000_DMA_H
0032 
0033 #include <linux/io.h>       /* need byte IO */
0034 #include <linux/spinlock.h> /* And spinlocks */
0035 #include <linux/delay.h>
0036 
0037 #define NUM_AU1000_DMA_CHANNELS 8
0038 
0039 /* DMA Channel Register Offsets */
0040 #define DMA_MODE_SET        0x00000000
0041 #define DMA_MODE_READ       DMA_MODE_SET
0042 #define DMA_MODE_CLEAR      0x00000004
0043 /* DMA Mode register bits follow */
0044 #define DMA_DAH_MASK        (0x0f << 20)
0045 #define DMA_DID_BIT     16
0046 #define DMA_DID_MASK        (0x0f << DMA_DID_BIT)
0047 #define DMA_DS          (1 << 15)
0048 #define DMA_BE          (1 << 13)
0049 #define DMA_DR          (1 << 12)
0050 #define DMA_TS8         (1 << 11)
0051 #define DMA_DW_BIT      9
0052 #define DMA_DW_MASK     (0x03 << DMA_DW_BIT)
0053 #define DMA_DW8         (0 << DMA_DW_BIT)
0054 #define DMA_DW16        (1 << DMA_DW_BIT)
0055 #define DMA_DW32        (2 << DMA_DW_BIT)
0056 #define DMA_NC          (1 << 8)
0057 #define DMA_IE          (1 << 7)
0058 #define DMA_HALT        (1 << 6)
0059 #define DMA_GO          (1 << 5)
0060 #define DMA_AB          (1 << 4)
0061 #define DMA_D1          (1 << 3)
0062 #define DMA_BE1         (1 << 2)
0063 #define DMA_D0          (1 << 1)
0064 #define DMA_BE0         (1 << 0)
0065 
0066 #define DMA_PERIPHERAL_ADDR 0x00000008
0067 #define DMA_BUFFER0_START   0x0000000C
0068 #define DMA_BUFFER1_START   0x00000014
0069 #define DMA_BUFFER0_COUNT   0x00000010
0070 #define DMA_BUFFER1_COUNT   0x00000018
0071 #define DMA_BAH_BIT 16
0072 #define DMA_BAH_MASK    (0x0f << DMA_BAH_BIT)
0073 #define DMA_COUNT_BIT   0
0074 #define DMA_COUNT_MASK  (0xffff << DMA_COUNT_BIT)
0075 
0076 /* DMA Device IDs follow */
0077 enum {
0078     DMA_ID_UART0_TX = 0,
0079     DMA_ID_UART0_RX,
0080     DMA_ID_GP04,
0081     DMA_ID_GP05,
0082     DMA_ID_AC97C_TX,
0083     DMA_ID_AC97C_RX,
0084     DMA_ID_UART3_TX,
0085     DMA_ID_UART3_RX,
0086     DMA_ID_USBDEV_EP0_RX,
0087     DMA_ID_USBDEV_EP0_TX,
0088     DMA_ID_USBDEV_EP2_TX,
0089     DMA_ID_USBDEV_EP3_TX,
0090     DMA_ID_USBDEV_EP4_RX,
0091     DMA_ID_USBDEV_EP5_RX,
0092     DMA_ID_I2S_TX,
0093     DMA_ID_I2S_RX,
0094     DMA_NUM_DEV
0095 };
0096 
0097 /* DMA Device ID's for 2nd bank (AU1100) follow */
0098 enum {
0099     DMA_ID_SD0_TX = 0,
0100     DMA_ID_SD0_RX,
0101     DMA_ID_SD1_TX,
0102     DMA_ID_SD1_RX,
0103     DMA_NUM_DEV_BANK2
0104 };
0105 
0106 struct dma_chan {
0107     int dev_id;     /* this channel is allocated if >= 0, */
0108                 /* free otherwise */
0109     void __iomem *io;
0110     const char *dev_str;
0111     int irq;
0112     void *irq_dev;
0113     unsigned int fifo_addr;
0114     unsigned int mode;
0115 };
0116 
0117 /* These are in arch/mips/au1000/common/dma.c */
0118 extern struct dma_chan au1000_dma_table[];
0119 extern int request_au1000_dma(int dev_id,
0120                   const char *dev_str,
0121                   irq_handler_t irqhandler,
0122                   unsigned long irqflags,
0123                   void *irq_dev_id);
0124 extern void free_au1000_dma(unsigned int dmanr);
0125 extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
0126                 int length, int *eof, void *data);
0127 extern void dump_au1000_dma_channel(unsigned int dmanr);
0128 extern spinlock_t au1000_dma_spin_lock;
0129 
0130 static inline struct dma_chan *get_dma_chan(unsigned int dmanr)
0131 {
0132     if (dmanr >= NUM_AU1000_DMA_CHANNELS ||
0133         au1000_dma_table[dmanr].dev_id < 0)
0134         return NULL;
0135     return &au1000_dma_table[dmanr];
0136 }
0137 
0138 static inline unsigned long claim_dma_lock(void)
0139 {
0140     unsigned long flags;
0141 
0142     spin_lock_irqsave(&au1000_dma_spin_lock, flags);
0143     return flags;
0144 }
0145 
0146 static inline void release_dma_lock(unsigned long flags)
0147 {
0148     spin_unlock_irqrestore(&au1000_dma_spin_lock, flags);
0149 }
0150 
0151 /*
0152  * Set the DMA buffer enable bits in the mode register.
0153  */
0154 static inline void enable_dma_buffer0(unsigned int dmanr)
0155 {
0156     struct dma_chan *chan = get_dma_chan(dmanr);
0157 
0158     if (!chan)
0159         return;
0160     __raw_writel(DMA_BE0, chan->io + DMA_MODE_SET);
0161 }
0162 
0163 static inline void enable_dma_buffer1(unsigned int dmanr)
0164 {
0165     struct dma_chan *chan = get_dma_chan(dmanr);
0166 
0167     if (!chan)
0168         return;
0169     __raw_writel(DMA_BE1, chan->io + DMA_MODE_SET);
0170 }
0171 static inline void enable_dma_buffers(unsigned int dmanr)
0172 {
0173     struct dma_chan *chan = get_dma_chan(dmanr);
0174 
0175     if (!chan)
0176         return;
0177     __raw_writel(DMA_BE0 | DMA_BE1, chan->io + DMA_MODE_SET);
0178 }
0179 
0180 static inline void start_dma(unsigned int dmanr)
0181 {
0182     struct dma_chan *chan = get_dma_chan(dmanr);
0183 
0184     if (!chan)
0185         return;
0186     __raw_writel(DMA_GO, chan->io + DMA_MODE_SET);
0187 }
0188 
0189 #define DMA_HALT_POLL 0x5000
0190 
0191 static inline void halt_dma(unsigned int dmanr)
0192 {
0193     struct dma_chan *chan = get_dma_chan(dmanr);
0194     int i;
0195 
0196     if (!chan)
0197         return;
0198     __raw_writel(DMA_GO, chan->io + DMA_MODE_CLEAR);
0199 
0200     /* Poll the halt bit */
0201     for (i = 0; i < DMA_HALT_POLL; i++)
0202         if (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT)
0203             break;
0204     if (i == DMA_HALT_POLL)
0205         printk(KERN_INFO "halt_dma: HALT poll expired!\n");
0206 }
0207 
0208 static inline void disable_dma(unsigned int dmanr)
0209 {
0210     struct dma_chan *chan = get_dma_chan(dmanr);
0211 
0212     if (!chan)
0213         return;
0214 
0215     halt_dma(dmanr);
0216 
0217     /* Now we can disable the buffers */
0218     __raw_writel(~DMA_GO, chan->io + DMA_MODE_CLEAR);
0219 }
0220 
0221 static inline int dma_halted(unsigned int dmanr)
0222 {
0223     struct dma_chan *chan = get_dma_chan(dmanr);
0224 
0225     if (!chan)
0226         return 1;
0227     return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT) ? 1 : 0;
0228 }
0229 
0230 /* Initialize a DMA channel. */
0231 static inline void init_dma(unsigned int dmanr)
0232 {
0233     struct dma_chan *chan = get_dma_chan(dmanr);
0234     u32 mode;
0235 
0236     if (!chan)
0237         return;
0238 
0239     disable_dma(dmanr);
0240 
0241     /* Set device FIFO address */
0242     __raw_writel(CPHYSADDR(chan->fifo_addr), chan->io + DMA_PERIPHERAL_ADDR);
0243 
0244     mode = chan->mode | (chan->dev_id << DMA_DID_BIT);
0245     if (chan->irq)
0246         mode |= DMA_IE;
0247 
0248     __raw_writel(~mode, chan->io + DMA_MODE_CLEAR);
0249     __raw_writel(mode,   chan->io + DMA_MODE_SET);
0250 }
0251 
0252 /*
0253  * Set mode for a specific DMA channel
0254  */
0255 static inline void set_dma_mode(unsigned int dmanr, unsigned int mode)
0256 {
0257     struct dma_chan *chan = get_dma_chan(dmanr);
0258 
0259     if (!chan)
0260         return;
0261     /*
0262      * set_dma_mode is only allowed to change endianess, direction,
0263      * transfer size, device FIFO width, and coherency settings.
0264      * Make sure anything else is masked off.
0265      */
0266     mode &= (DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
0267     chan->mode &= ~(DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
0268     chan->mode |= mode;
0269 }
0270 
0271 static inline unsigned int get_dma_mode(unsigned int dmanr)
0272 {
0273     struct dma_chan *chan = get_dma_chan(dmanr);
0274 
0275     if (!chan)
0276         return 0;
0277     return chan->mode;
0278 }
0279 
0280 static inline int get_dma_active_buffer(unsigned int dmanr)
0281 {
0282     struct dma_chan *chan = get_dma_chan(dmanr);
0283 
0284     if (!chan)
0285         return -1;
0286     return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? 1 : 0;
0287 }
0288 
0289 /*
0290  * Set the device FIFO address for a specific DMA channel - only
0291  * applicable to GPO4 and GPO5. All the other devices have fixed
0292  * FIFO addresses.
0293  */
0294 static inline void set_dma_fifo_addr(unsigned int dmanr, unsigned int a)
0295 {
0296     struct dma_chan *chan = get_dma_chan(dmanr);
0297 
0298     if (!chan)
0299         return;
0300 
0301     if (chan->mode & DMA_DS)    /* second bank of device IDs */
0302         return;
0303 
0304     if (chan->dev_id != DMA_ID_GP04 && chan->dev_id != DMA_ID_GP05)
0305         return;
0306 
0307     __raw_writel(CPHYSADDR(a), chan->io + DMA_PERIPHERAL_ADDR);
0308 }
0309 
0310 /*
0311  * Clear the DMA buffer done bits in the mode register.
0312  */
0313 static inline void clear_dma_done0(unsigned int dmanr)
0314 {
0315     struct dma_chan *chan = get_dma_chan(dmanr);
0316 
0317     if (!chan)
0318         return;
0319     __raw_writel(DMA_D0, chan->io + DMA_MODE_CLEAR);
0320 }
0321 
0322 static inline void clear_dma_done1(unsigned int dmanr)
0323 {
0324     struct dma_chan *chan = get_dma_chan(dmanr);
0325 
0326     if (!chan)
0327         return;
0328     __raw_writel(DMA_D1, chan->io + DMA_MODE_CLEAR);
0329 }
0330 
0331 /*
0332  * This does nothing - not applicable to Au1000 DMA.
0333  */
0334 static inline void set_dma_page(unsigned int dmanr, char pagenr)
0335 {
0336 }
0337 
0338 /*
0339  * Set Buffer 0 transfer address for specific DMA channel.
0340  */
0341 static inline void set_dma_addr0(unsigned int dmanr, unsigned int a)
0342 {
0343     struct dma_chan *chan = get_dma_chan(dmanr);
0344 
0345     if (!chan)
0346         return;
0347     __raw_writel(a, chan->io + DMA_BUFFER0_START);
0348 }
0349 
0350 /*
0351  * Set Buffer 1 transfer address for specific DMA channel.
0352  */
0353 static inline void set_dma_addr1(unsigned int dmanr, unsigned int a)
0354 {
0355     struct dma_chan *chan = get_dma_chan(dmanr);
0356 
0357     if (!chan)
0358         return;
0359     __raw_writel(a, chan->io + DMA_BUFFER1_START);
0360 }
0361 
0362 
0363 /*
0364  * Set Buffer 0 transfer size (max 64k) for a specific DMA channel.
0365  */
0366 static inline void set_dma_count0(unsigned int dmanr, unsigned int count)
0367 {
0368     struct dma_chan *chan = get_dma_chan(dmanr);
0369 
0370     if (!chan)
0371         return;
0372     count &= DMA_COUNT_MASK;
0373     __raw_writel(count, chan->io + DMA_BUFFER0_COUNT);
0374 }
0375 
0376 /*
0377  * Set Buffer 1 transfer size (max 64k) for a specific DMA channel.
0378  */
0379 static inline void set_dma_count1(unsigned int dmanr, unsigned int count)
0380 {
0381     struct dma_chan *chan = get_dma_chan(dmanr);
0382 
0383     if (!chan)
0384         return;
0385     count &= DMA_COUNT_MASK;
0386     __raw_writel(count, chan->io + DMA_BUFFER1_COUNT);
0387 }
0388 
0389 /*
0390  * Set both buffer transfer sizes (max 64k) for a specific DMA channel.
0391  */
0392 static inline void set_dma_count(unsigned int dmanr, unsigned int count)
0393 {
0394     struct dma_chan *chan = get_dma_chan(dmanr);
0395 
0396     if (!chan)
0397         return;
0398     count &= DMA_COUNT_MASK;
0399     __raw_writel(count, chan->io + DMA_BUFFER0_COUNT);
0400     __raw_writel(count, chan->io + DMA_BUFFER1_COUNT);
0401 }
0402 
0403 /*
0404  * Returns which buffer has its done bit set in the mode register.
0405  * Returns -1 if neither or both done bits set.
0406  */
0407 static inline unsigned int get_dma_buffer_done(unsigned int dmanr)
0408 {
0409     struct dma_chan *chan = get_dma_chan(dmanr);
0410 
0411     if (!chan)
0412         return 0;
0413     return __raw_readl(chan->io + DMA_MODE_READ) & (DMA_D0 | DMA_D1);
0414 }
0415 
0416 
0417 /*
0418  * Returns the DMA channel's Buffer Done IRQ number.
0419  */
0420 static inline int get_dma_done_irq(unsigned int dmanr)
0421 {
0422     struct dma_chan *chan = get_dma_chan(dmanr);
0423 
0424     if (!chan)
0425         return -1;
0426     return chan->irq;
0427 }
0428 
0429 /*
0430  * Get DMA residue count. Returns the number of _bytes_ left to transfer.
0431  */
0432 static inline int get_dma_residue(unsigned int dmanr)
0433 {
0434     int curBufCntReg, count;
0435     struct dma_chan *chan = get_dma_chan(dmanr);
0436 
0437     if (!chan)
0438         return 0;
0439 
0440     curBufCntReg = (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ?
0441         DMA_BUFFER1_COUNT : DMA_BUFFER0_COUNT;
0442 
0443     count = __raw_readl(chan->io + curBufCntReg) & DMA_COUNT_MASK;
0444 
0445     if ((chan->mode & DMA_DW_MASK) == DMA_DW16)
0446         count <<= 1;
0447     else if ((chan->mode & DMA_DW_MASK) == DMA_DW32)
0448         count <<= 2;
0449 
0450     return count;
0451 }
0452 
0453 #endif /* __ASM_AU1000_DMA_H */