Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  *
0003  * BRIEF MODULE DESCRIPTION
0004  *      The Descriptor Based DMA channel manager that first appeared
0005  *  on the Au1550.  I started with dma.c, but I think all that is
0006  *  left is this initial comment :-)
0007  *
0008  * Copyright 2004 Embedded Edge, LLC
0009  *  dan@embeddededge.com
0010  *
0011  *  This program is free software; you can redistribute  it and/or modify it
0012  *  under  the terms of  the GNU General  Public License as published by the
0013  *  Free Software Foundation;  either version 2 of the  License, or (at your
0014  *  option) any later version.
0015  *
0016  *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
0017  *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
0018  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
0019  *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
0020  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
0021  *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
0022  *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
0023  *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
0024  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
0025  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0026  *
0027  *  You should have received a copy of the  GNU General Public License along
0028  *  with this program; if not, write  to the Free Software Foundation, Inc.,
0029  *  675 Mass Ave, Cambridge, MA 02139, USA.
0030  *
0031  */
0032 
0033 #include <linux/init.h>
0034 #include <linux/kernel.h>
0035 #include <linux/slab.h>
0036 #include <linux/spinlock.h>
0037 #include <linux/interrupt.h>
0038 #include <linux/export.h>
0039 #include <linux/syscore_ops.h>
0040 #include <asm/mach-au1x00/au1000.h>
0041 #include <asm/mach-au1x00/au1xxx_dbdma.h>
0042 
0043 /*
0044  * The Descriptor Based DMA supports up to 16 channels.
0045  *
0046  * There are 32 devices defined. We keep an internal structure
0047  * of devices using these channels, along with additional
0048  * information.
0049  *
0050  * We allocate the descriptors and allow access to them through various
0051  * functions.  The drivers allocate the data buffers and assign them
0052  * to the descriptors.
0053  */
0054 static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
0055 
0056 /* I couldn't find a macro that did this... */
0057 #define ALIGN_ADDR(x, a)    ((((u32)(x)) + (a-1)) & ~(a-1))
0058 
0059 static dbdma_global_t *dbdma_gptr =
0060             (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
0061 static int dbdma_initialized;
0062 
0063 static dbdev_tab_t *dbdev_tab;
0064 
0065 static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
0066     /* UARTS */
0067     { AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
0068     { AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
0069     { AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
0070     { AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8, 0x11400000, 0, 0 },
0071 
0072     /* EXT DMA */
0073     { AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
0074     { AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
0075     { AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
0076     { AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
0077 
0078     /* USB DEV */
0079     { AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN,  4, 8, 0x10200000, 0, 0 },
0080     { AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
0081     { AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
0082     { AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
0083     { AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN,  4, 8, 0x10200010, 0, 0 },
0084     { AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN,  4, 8, 0x10200014, 0, 0 },
0085 
0086     /* PSCs */
0087     { AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
0088     { AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,  0, 0, 0x11a0001c, 0, 0 },
0089     { AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
0090     { AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,  0, 0, 0x11b0001c, 0, 0 },
0091     { AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
0092     { AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,  0, 0, 0x10a0001c, 0, 0 },
0093     { AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
0094     { AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,  0, 0, 0x10b0001c, 0, 0 },
0095 
0096     { AU1550_DSCR_CMD0_PCI_WRITE,  0, 0, 0, 0x00000000, 0, 0 },  /* PCI */
0097     { AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
0098 
0099     /* MAC 0 */
0100     { AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
0101     { AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
0102 
0103     /* MAC 1 */
0104     { AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
0105     { AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
0106 
0107     { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0108     { DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0109 };
0110 
0111 static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
0112     { AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
0113     { AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
0114     { AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
0115     { AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8, 0x11200000, 0, 0 },
0116 
0117     { AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
0118     { AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
0119 
0120     { AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0121     { AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0122     { AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0123     { AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0124 
0125     { AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
0126     { AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8, 0x10600004, 0, 0 },
0127     { AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
0128     { AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  4, 8, 0x10680004, 0, 0 },
0129 
0130     { AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
0131     { AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
0132 
0133     { AU1200_DSCR_CMD0_PSC0_TX,   DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
0134     { AU1200_DSCR_CMD0_PSC0_RX,   DEV_FLAGS_IN,  0, 16, 0x11a0001c, 0, 0 },
0135     { AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0136     { AU1200_DSCR_CMD0_PSC1_TX,   DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
0137     { AU1200_DSCR_CMD0_PSC1_RX,   DEV_FLAGS_IN,  0, 16, 0x11b0001c, 0, 0 },
0138     { AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0139 
0140     { AU1200_DSCR_CMD0_CIM_RXA,  DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
0141     { AU1200_DSCR_CMD0_CIM_RXB,  DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
0142     { AU1200_DSCR_CMD0_CIM_RXC,  DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
0143     { AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0144 
0145     { AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
0146 
0147     { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0148     { DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0149 };
0150 
0151 static dbdev_tab_t au1300_dbdev_tab[] __initdata = {
0152     { AU1300_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8,  0x10100004, 0, 0 },
0153     { AU1300_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8,  0x10100000, 0, 0 },
0154     { AU1300_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8,  0x10101004, 0, 0 },
0155     { AU1300_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8,  0x10101000, 0, 0 },
0156     { AU1300_DSCR_CMD0_UART2_TX, DEV_FLAGS_OUT, 0, 8,  0x10102004, 0, 0 },
0157     { AU1300_DSCR_CMD0_UART2_RX, DEV_FLAGS_IN,  0, 8,  0x10102000, 0, 0 },
0158     { AU1300_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8,  0x10103004, 0, 0 },
0159     { AU1300_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8,  0x10103000, 0, 0 },
0160 
0161     { AU1300_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8,  0x10600000, 0, 0 },
0162     { AU1300_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8,  0x10600004, 0, 0 },
0163     { AU1300_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 8, 8,  0x10601000, 0, 0 },
0164     { AU1300_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  8, 8,  0x10601004, 0, 0 },
0165 
0166     { AU1300_DSCR_CMD0_AES_RX, DEV_FLAGS_IN ,   4, 32, 0x10300008, 0, 0 },
0167     { AU1300_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT,   4, 32, 0x10300004, 0, 0 },
0168 
0169     { AU1300_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0001c, 0, 0 },
0170     { AU1300_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,   0, 16, 0x10a0001c, 0, 0 },
0171     { AU1300_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0101c, 0, 0 },
0172     { AU1300_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,   0, 16, 0x10a0101c, 0, 0 },
0173     { AU1300_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0201c, 0, 0 },
0174     { AU1300_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,   0, 16, 0x10a0201c, 0, 0 },
0175     { AU1300_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0301c, 0, 0 },
0176     { AU1300_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,   0, 16, 0x10a0301c, 0, 0 },
0177 
0178     { AU1300_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE,   0, 0,  0x00000000, 0, 0 },
0179     { AU1300_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
0180 
0181     { AU1300_DSCR_CMD0_SDMS_TX2, DEV_FLAGS_OUT, 4, 8,  0x10602000, 0, 0 },
0182     { AU1300_DSCR_CMD0_SDMS_RX2, DEV_FLAGS_IN,  4, 8,  0x10602004, 0, 0 },
0183 
0184     { AU1300_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0185 
0186     { AU1300_DSCR_CMD0_UDMA, DEV_FLAGS_ANYUSE,  0, 32, 0x14001810, 0, 0 },
0187 
0188     { AU1300_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
0189     { AU1300_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
0190 
0191     { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0192     { DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
0193 };
0194 
0195 /* 32 predefined plus 32 custom */
0196 #define DBDEV_TAB_SIZE      64
0197 
0198 static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
0199 
0200 static dbdev_tab_t *find_dbdev_id(u32 id)
0201 {
0202     int i;
0203     dbdev_tab_t *p;
0204     for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
0205         p = &dbdev_tab[i];
0206         if (p->dev_id == id)
0207             return p;
0208     }
0209     return NULL;
0210 }
0211 
0212 void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
0213 {
0214     return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
0215 }
0216 EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
0217 
0218 u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
0219 {
0220     u32 ret = 0;
0221     dbdev_tab_t *p;
0222     static u16 new_id = 0x1000;
0223 
0224     p = find_dbdev_id(~0);
0225     if (NULL != p) {
0226         memcpy(p, dev, sizeof(dbdev_tab_t));
0227         p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
0228         ret = p->dev_id;
0229         new_id++;
0230 #if 0
0231         printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
0232                   p->dev_id, p->dev_flags, p->dev_physaddr);
0233 #endif
0234     }
0235 
0236     return ret;
0237 }
0238 EXPORT_SYMBOL(au1xxx_ddma_add_device);
0239 
0240 void au1xxx_ddma_del_device(u32 devid)
0241 {
0242     dbdev_tab_t *p = find_dbdev_id(devid);
0243 
0244     if (p != NULL) {
0245         memset(p, 0, sizeof(dbdev_tab_t));
0246         p->dev_id = ~0;
0247     }
0248 }
0249 EXPORT_SYMBOL(au1xxx_ddma_del_device);
0250 
0251 /* Allocate a channel and return a non-zero descriptor if successful. */
0252 u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
0253        void (*callback)(int, void *), void *callparam)
0254 {
0255     unsigned long   flags;
0256     u32     used, chan;
0257     u32     dcp;
0258     int     i;
0259     dbdev_tab_t *stp, *dtp;
0260     chan_tab_t  *ctp;
0261     au1x_dma_chan_t *cp;
0262 
0263     /*
0264      * We do the initialization on the first channel allocation.
0265      * We have to wait because of the interrupt handler initialization
0266      * which can't be done successfully during board set up.
0267      */
0268     if (!dbdma_initialized)
0269         return 0;
0270 
0271     stp = find_dbdev_id(srcid);
0272     if (stp == NULL)
0273         return 0;
0274     dtp = find_dbdev_id(destid);
0275     if (dtp == NULL)
0276         return 0;
0277 
0278     used = 0;
0279 
0280     /* Check to see if we can get both channels. */
0281     spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
0282     if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
0283          (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
0284         /* Got source */
0285         stp->dev_flags |= DEV_FLAGS_INUSE;
0286         if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
0287              (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
0288             /* Got destination */
0289             dtp->dev_flags |= DEV_FLAGS_INUSE;
0290         } else {
0291             /* Can't get dest.  Release src. */
0292             stp->dev_flags &= ~DEV_FLAGS_INUSE;
0293             used++;
0294         }
0295     } else
0296         used++;
0297     spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
0298 
0299     if (used)
0300         return 0;
0301 
0302     /* Let's see if we can allocate a channel for it. */
0303     ctp = NULL;
0304     chan = 0;
0305     spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
0306     for (i = 0; i < NUM_DBDMA_CHANS; i++)
0307         if (chan_tab_ptr[i] == NULL) {
0308             /*
0309              * If kmalloc fails, it is caught below same
0310              * as a channel not available.
0311              */
0312             ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
0313             chan_tab_ptr[i] = ctp;
0314             break;
0315         }
0316     spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
0317 
0318     if (ctp != NULL) {
0319         memset(ctp, 0, sizeof(chan_tab_t));
0320         ctp->chan_index = chan = i;
0321         dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
0322         dcp += (0x0100 * chan);
0323         ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
0324         cp = (au1x_dma_chan_t *)dcp;
0325         ctp->chan_src = stp;
0326         ctp->chan_dest = dtp;
0327         ctp->chan_callback = callback;
0328         ctp->chan_callparam = callparam;
0329 
0330         /* Initialize channel configuration. */
0331         i = 0;
0332         if (stp->dev_intlevel)
0333             i |= DDMA_CFG_SED;
0334         if (stp->dev_intpolarity)
0335             i |= DDMA_CFG_SP;
0336         if (dtp->dev_intlevel)
0337             i |= DDMA_CFG_DED;
0338         if (dtp->dev_intpolarity)
0339             i |= DDMA_CFG_DP;
0340         if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
0341             (dtp->dev_flags & DEV_FLAGS_SYNC))
0342                 i |= DDMA_CFG_SYNC;
0343         cp->ddma_cfg = i;
0344         wmb(); /* drain writebuffer */
0345 
0346         /*
0347          * Return a non-zero value that can be used to find the channel
0348          * information in subsequent operations.
0349          */
0350         return (u32)(&chan_tab_ptr[chan]);
0351     }
0352 
0353     /* Release devices */
0354     stp->dev_flags &= ~DEV_FLAGS_INUSE;
0355     dtp->dev_flags &= ~DEV_FLAGS_INUSE;
0356 
0357     return 0;
0358 }
0359 EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
0360 
0361 /*
0362  * Set the device width if source or destination is a FIFO.
0363  * Should be 8, 16, or 32 bits.
0364  */
0365 u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
0366 {
0367     u32     rv;
0368     chan_tab_t  *ctp;
0369     dbdev_tab_t *stp, *dtp;
0370 
0371     ctp = *((chan_tab_t **)chanid);
0372     stp = ctp->chan_src;
0373     dtp = ctp->chan_dest;
0374     rv = 0;
0375 
0376     if (stp->dev_flags & DEV_FLAGS_IN) {    /* Source in fifo */
0377         rv = stp->dev_devwidth;
0378         stp->dev_devwidth = bits;
0379     }
0380     if (dtp->dev_flags & DEV_FLAGS_OUT) {   /* Destination out fifo */
0381         rv = dtp->dev_devwidth;
0382         dtp->dev_devwidth = bits;
0383     }
0384 
0385     return rv;
0386 }
0387 EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
0388 
0389 /* Allocate a descriptor ring, initializing as much as possible. */
0390 u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
0391 {
0392     int         i;
0393     u32         desc_base, srcid, destid;
0394     u32         cmd0, cmd1, src1, dest1;
0395     u32         src0, dest0;
0396     chan_tab_t      *ctp;
0397     dbdev_tab_t     *stp, *dtp;
0398     au1x_ddma_desc_t    *dp;
0399 
0400     /*
0401      * I guess we could check this to be within the
0402      * range of the table......
0403      */
0404     ctp = *((chan_tab_t **)chanid);
0405     stp = ctp->chan_src;
0406     dtp = ctp->chan_dest;
0407 
0408     /*
0409      * The descriptors must be 32-byte aligned.  There is a
0410      * possibility the allocation will give us such an address,
0411      * and if we try that first we are likely to not waste larger
0412      * slabs of memory.
0413      */
0414     desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
0415                        GFP_KERNEL|GFP_DMA);
0416     if (desc_base == 0)
0417         return 0;
0418 
0419     if (desc_base & 0x1f) {
0420         /*
0421          * Lost....do it again, allocate extra, and round
0422          * the address base.
0423          */
0424         kfree((const void *)desc_base);
0425         i = entries * sizeof(au1x_ddma_desc_t);
0426         i += (sizeof(au1x_ddma_desc_t) - 1);
0427         desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
0428         if (desc_base == 0)
0429             return 0;
0430 
0431         ctp->cdb_membase = desc_base;
0432         desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
0433     } else
0434         ctp->cdb_membase = desc_base;
0435 
0436     dp = (au1x_ddma_desc_t *)desc_base;
0437 
0438     /* Keep track of the base descriptor. */
0439     ctp->chan_desc_base = dp;
0440 
0441     /* Initialize the rings with as much information as we know. */
0442     srcid = stp->dev_id;
0443     destid = dtp->dev_id;
0444 
0445     cmd0 = cmd1 = src1 = dest1 = 0;
0446     src0 = dest0 = 0;
0447 
0448     cmd0 |= DSCR_CMD0_SID(srcid);
0449     cmd0 |= DSCR_CMD0_DID(destid);
0450     cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
0451     cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
0452 
0453     /* Is it mem to mem transfer? */
0454     if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
0455          (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
0456         ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
0457          (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
0458         cmd0 |= DSCR_CMD0_MEM;
0459 
0460     switch (stp->dev_devwidth) {
0461     case 8:
0462         cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
0463         break;
0464     case 16:
0465         cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
0466         break;
0467     case 32:
0468     default:
0469         cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
0470         break;
0471     }
0472 
0473     switch (dtp->dev_devwidth) {
0474     case 8:
0475         cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
0476         break;
0477     case 16:
0478         cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
0479         break;
0480     case 32:
0481     default:
0482         cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
0483         break;
0484     }
0485 
0486     /*
0487      * If the device is marked as an in/out FIFO, ensure it is
0488      * set non-coherent.
0489      */
0490     if (stp->dev_flags & DEV_FLAGS_IN)
0491         cmd0 |= DSCR_CMD0_SN;       /* Source in FIFO */
0492     if (dtp->dev_flags & DEV_FLAGS_OUT)
0493         cmd0 |= DSCR_CMD0_DN;       /* Destination out FIFO */
0494 
0495     /*
0496      * Set up source1.  For now, assume no stride and increment.
0497      * A channel attribute update can change this later.
0498      */
0499     switch (stp->dev_tsize) {
0500     case 1:
0501         src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
0502         break;
0503     case 2:
0504         src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
0505         break;
0506     case 4:
0507         src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
0508         break;
0509     case 8:
0510     default:
0511         src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
0512         break;
0513     }
0514 
0515     /* If source input is FIFO, set static address. */
0516     if (stp->dev_flags & DEV_FLAGS_IN) {
0517         if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
0518             src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
0519         else
0520             src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
0521     }
0522 
0523     if (stp->dev_physaddr)
0524         src0 = stp->dev_physaddr;
0525 
0526     /*
0527      * Set up dest1.  For now, assume no stride and increment.
0528      * A channel attribute update can change this later.
0529      */
0530     switch (dtp->dev_tsize) {
0531     case 1:
0532         dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
0533         break;
0534     case 2:
0535         dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
0536         break;
0537     case 4:
0538         dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
0539         break;
0540     case 8:
0541     default:
0542         dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
0543         break;
0544     }
0545 
0546     /* If destination output is FIFO, set static address. */
0547     if (dtp->dev_flags & DEV_FLAGS_OUT) {
0548         if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
0549             dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
0550         else
0551             dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
0552     }
0553 
0554     if (dtp->dev_physaddr)
0555         dest0 = dtp->dev_physaddr;
0556 
0557 #if 0
0558         printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
0559                   "source1:%x dest0:%x dest1:%x\n",
0560                   dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
0561                   src1, dest0, dest1);
0562 #endif
0563     for (i = 0; i < entries; i++) {
0564         dp->dscr_cmd0 = cmd0;
0565         dp->dscr_cmd1 = cmd1;
0566         dp->dscr_source0 = src0;
0567         dp->dscr_source1 = src1;
0568         dp->dscr_dest0 = dest0;
0569         dp->dscr_dest1 = dest1;
0570         dp->dscr_stat = 0;
0571         dp->sw_context = 0;
0572         dp->sw_status = 0;
0573         dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
0574         dp++;
0575     }
0576 
0577     /* Make last descriptor point to the first. */
0578     dp--;
0579     dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
0580     ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
0581 
0582     return (u32)ctp->chan_desc_base;
0583 }
0584 EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
0585 
0586 /*
0587  * Put a source buffer into the DMA ring.
0588  * This updates the source pointer and byte count.  Normally used
0589  * for memory to fifo transfers.
0590  */
0591 u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
0592 {
0593     chan_tab_t      *ctp;
0594     au1x_ddma_desc_t    *dp;
0595 
0596     /*
0597      * I guess we could check this to be within the
0598      * range of the table......
0599      */
0600     ctp = *(chan_tab_t **)chanid;
0601 
0602     /*
0603      * We should have multiple callers for a particular channel,
0604      * an interrupt doesn't affect this pointer nor the descriptor,
0605      * so no locking should be needed.
0606      */
0607     dp = ctp->put_ptr;
0608 
0609     /*
0610      * If the descriptor is valid, we are way ahead of the DMA
0611      * engine, so just return an error condition.
0612      */
0613     if (dp->dscr_cmd0 & DSCR_CMD0_V)
0614         return 0;
0615 
0616     /* Load up buffer address and byte count. */
0617     dp->dscr_source0 = buf & ~0UL;
0618     dp->dscr_cmd1 = nbytes;
0619     /* Check flags */
0620     if (flags & DDMA_FLAGS_IE)
0621         dp->dscr_cmd0 |= DSCR_CMD0_IE;
0622     if (flags & DDMA_FLAGS_NOIE)
0623         dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
0624 
0625     /*
0626      * There is an errata on the Au1200/Au1550 parts that could result
0627      * in "stale" data being DMA'ed. It has to do with the snoop logic on
0628      * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
0629      * these parts. If it is fixed in the future, these dma_cache_inv will
0630      * just be nothing more than empty macros. See io.h.
0631      */
0632     dma_cache_wback_inv((unsigned long)buf, nbytes);
0633     dp->dscr_cmd0 |= DSCR_CMD0_V;   /* Let it rip */
0634     wmb(); /* drain writebuffer */
0635     dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
0636     ctp->chan_ptr->ddma_dbell = 0;
0637 
0638     /* Get next descriptor pointer. */
0639     ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
0640 
0641     /* Return something non-zero. */
0642     return nbytes;
0643 }
0644 EXPORT_SYMBOL(au1xxx_dbdma_put_source);
0645 
0646 /* Put a destination buffer into the DMA ring.
0647  * This updates the destination pointer and byte count.  Normally used
0648  * to place an empty buffer into the ring for fifo to memory transfers.
0649  */
0650 u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
0651 {
0652     chan_tab_t      *ctp;
0653     au1x_ddma_desc_t    *dp;
0654 
0655     /* I guess we could check this to be within the
0656      * range of the table......
0657      */
0658     ctp = *((chan_tab_t **)chanid);
0659 
0660     /* We should have multiple callers for a particular channel,
0661      * an interrupt doesn't affect this pointer nor the descriptor,
0662      * so no locking should be needed.
0663      */
0664     dp = ctp->put_ptr;
0665 
0666     /* If the descriptor is valid, we are way ahead of the DMA
0667      * engine, so just return an error condition.
0668      */
0669     if (dp->dscr_cmd0 & DSCR_CMD0_V)
0670         return 0;
0671 
0672     /* Load up buffer address and byte count */
0673 
0674     /* Check flags  */
0675     if (flags & DDMA_FLAGS_IE)
0676         dp->dscr_cmd0 |= DSCR_CMD0_IE;
0677     if (flags & DDMA_FLAGS_NOIE)
0678         dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
0679 
0680     dp->dscr_dest0 = buf & ~0UL;
0681     dp->dscr_cmd1 = nbytes;
0682 #if 0
0683     printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
0684               dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
0685               dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
0686 #endif
0687     /*
0688      * There is an errata on the Au1200/Au1550 parts that could result in
0689      * "stale" data being DMA'ed. It has to do with the snoop logic on the
0690      * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
0691      * parts. If it is fixed in the future, these dma_cache_inv will just
0692      * be nothing more than empty macros. See io.h.
0693      */
0694     dma_cache_inv((unsigned long)buf, nbytes);
0695     dp->dscr_cmd0 |= DSCR_CMD0_V;   /* Let it rip */
0696     wmb(); /* drain writebuffer */
0697     dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
0698     ctp->chan_ptr->ddma_dbell = 0;
0699 
0700     /* Get next descriptor pointer. */
0701     ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
0702 
0703     /* Return something non-zero. */
0704     return nbytes;
0705 }
0706 EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
0707 
0708 /*
0709  * Get a destination buffer into the DMA ring.
0710  * Normally used to get a full buffer from the ring during fifo
0711  * to memory transfers.  This does not set the valid bit, you will
0712  * have to put another destination buffer to keep the DMA going.
0713  */
0714 u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
0715 {
0716     chan_tab_t      *ctp;
0717     au1x_ddma_desc_t    *dp;
0718     u32         rv;
0719 
0720     /*
0721      * I guess we could check this to be within the
0722      * range of the table......
0723      */
0724     ctp = *((chan_tab_t **)chanid);
0725 
0726     /*
0727      * We should have multiple callers for a particular channel,
0728      * an interrupt doesn't affect this pointer nor the descriptor,
0729      * so no locking should be needed.
0730      */
0731     dp = ctp->get_ptr;
0732 
0733     /*
0734      * If the descriptor is valid, we are way ahead of the DMA
0735      * engine, so just return an error condition.
0736      */
0737     if (dp->dscr_cmd0 & DSCR_CMD0_V)
0738         return 0;
0739 
0740     /* Return buffer address and byte count. */
0741     *buf = (void *)(phys_to_virt(dp->dscr_dest0));
0742     *nbytes = dp->dscr_cmd1;
0743     rv = dp->dscr_stat;
0744 
0745     /* Get next descriptor pointer. */
0746     ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
0747 
0748     /* Return something non-zero. */
0749     return rv;
0750 }
0751 EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
0752 
0753 void au1xxx_dbdma_stop(u32 chanid)
0754 {
0755     chan_tab_t  *ctp;
0756     au1x_dma_chan_t *cp;
0757     int halt_timeout = 0;
0758 
0759     ctp = *((chan_tab_t **)chanid);
0760 
0761     cp = ctp->chan_ptr;
0762     cp->ddma_cfg &= ~DDMA_CFG_EN;   /* Disable channel */
0763     wmb(); /* drain writebuffer */
0764     while (!(cp->ddma_stat & DDMA_STAT_H)) {
0765         udelay(1);
0766         halt_timeout++;
0767         if (halt_timeout > 100) {
0768             printk(KERN_WARNING "warning: DMA channel won't halt\n");
0769             break;
0770         }
0771     }
0772     /* clear current desc valid and doorbell */
0773     cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
0774     wmb(); /* drain writebuffer */
0775 }
0776 EXPORT_SYMBOL(au1xxx_dbdma_stop);
0777 
0778 /*
0779  * Start using the current descriptor pointer.  If the DBDMA encounters
0780  * a non-valid descriptor, it will stop.  In this case, we can just
0781  * continue by adding a buffer to the list and starting again.
0782  */
0783 void au1xxx_dbdma_start(u32 chanid)
0784 {
0785     chan_tab_t  *ctp;
0786     au1x_dma_chan_t *cp;
0787 
0788     ctp = *((chan_tab_t **)chanid);
0789     cp = ctp->chan_ptr;
0790     cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
0791     cp->ddma_cfg |= DDMA_CFG_EN;    /* Enable channel */
0792     wmb(); /* drain writebuffer */
0793     cp->ddma_dbell = 0;
0794     wmb(); /* drain writebuffer */
0795 }
0796 EXPORT_SYMBOL(au1xxx_dbdma_start);
0797 
0798 void au1xxx_dbdma_reset(u32 chanid)
0799 {
0800     chan_tab_t      *ctp;
0801     au1x_ddma_desc_t    *dp;
0802 
0803     au1xxx_dbdma_stop(chanid);
0804 
0805     ctp = *((chan_tab_t **)chanid);
0806     ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
0807 
0808     /* Run through the descriptors and reset the valid indicator. */
0809     dp = ctp->chan_desc_base;
0810 
0811     do {
0812         dp->dscr_cmd0 &= ~DSCR_CMD0_V;
0813         /*
0814          * Reset our software status -- this is used to determine
0815          * if a descriptor is in use by upper level software. Since
0816          * posting can reset 'V' bit.
0817          */
0818         dp->sw_status = 0;
0819         dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
0820     } while (dp != ctp->chan_desc_base);
0821 }
0822 EXPORT_SYMBOL(au1xxx_dbdma_reset);
0823 
0824 u32 au1xxx_get_dma_residue(u32 chanid)
0825 {
0826     chan_tab_t  *ctp;
0827     au1x_dma_chan_t *cp;
0828     u32     rv;
0829 
0830     ctp = *((chan_tab_t **)chanid);
0831     cp = ctp->chan_ptr;
0832 
0833     /* This is only valid if the channel is stopped. */
0834     rv = cp->ddma_bytecnt;
0835     wmb(); /* drain writebuffer */
0836 
0837     return rv;
0838 }
0839 EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
0840 
0841 void au1xxx_dbdma_chan_free(u32 chanid)
0842 {
0843     chan_tab_t  *ctp;
0844     dbdev_tab_t *stp, *dtp;
0845 
0846     ctp = *((chan_tab_t **)chanid);
0847     stp = ctp->chan_src;
0848     dtp = ctp->chan_dest;
0849 
0850     au1xxx_dbdma_stop(chanid);
0851 
0852     kfree((void *)ctp->cdb_membase);
0853 
0854     stp->dev_flags &= ~DEV_FLAGS_INUSE;
0855     dtp->dev_flags &= ~DEV_FLAGS_INUSE;
0856     chan_tab_ptr[ctp->chan_index] = NULL;
0857 
0858     kfree(ctp);
0859 }
0860 EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
0861 
0862 static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
0863 {
0864     u32 intstat;
0865     u32 chan_index;
0866     chan_tab_t      *ctp;
0867     au1x_ddma_desc_t    *dp;
0868     au1x_dma_chan_t *cp;
0869 
0870     intstat = dbdma_gptr->ddma_intstat;
0871     wmb(); /* drain writebuffer */
0872     chan_index = __ffs(intstat);
0873 
0874     ctp = chan_tab_ptr[chan_index];
0875     cp = ctp->chan_ptr;
0876     dp = ctp->cur_ptr;
0877 
0878     /* Reset interrupt. */
0879     cp->ddma_irq = 0;
0880     wmb(); /* drain writebuffer */
0881 
0882     if (ctp->chan_callback)
0883         ctp->chan_callback(irq, ctp->chan_callparam);
0884 
0885     ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
0886     return IRQ_RETVAL(1);
0887 }
0888 
0889 void au1xxx_dbdma_dump(u32 chanid)
0890 {
0891     chan_tab_t   *ctp;
0892     au1x_ddma_desc_t *dp;
0893     dbdev_tab_t  *stp, *dtp;
0894     au1x_dma_chan_t  *cp;
0895     u32 i        = 0;
0896 
0897     ctp = *((chan_tab_t **)chanid);
0898     stp = ctp->chan_src;
0899     dtp = ctp->chan_dest;
0900     cp = ctp->chan_ptr;
0901 
0902     printk(KERN_DEBUG "Chan %x, stp %x (dev %d)  dtp %x (dev %d)\n",
0903               (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
0904               dtp - dbdev_tab);
0905     printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
0906               (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
0907               (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
0908 
0909     printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
0910     printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
0911               cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
0912     printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
0913               cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
0914               cp->ddma_bytecnt);
0915 
0916     /* Run through the descriptors */
0917     dp = ctp->chan_desc_base;
0918 
0919     do {
0920         printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
0921                   i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
0922         printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
0923                   dp->dscr_source0, dp->dscr_source1,
0924                   dp->dscr_dest0, dp->dscr_dest1);
0925         printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
0926                   dp->dscr_stat, dp->dscr_nxtptr);
0927         dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
0928     } while (dp != ctp->chan_desc_base);
0929 }
0930 
0931 /* Put a descriptor into the DMA ring.
0932  * This updates the source/destination pointers and byte count.
0933  */
0934 u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
0935 {
0936     chan_tab_t *ctp;
0937     au1x_ddma_desc_t *dp;
0938     u32 nbytes = 0;
0939 
0940     /*
0941      * I guess we could check this to be within the
0942      * range of the table......
0943      */
0944     ctp = *((chan_tab_t **)chanid);
0945 
0946     /*
0947      * We should have multiple callers for a particular channel,
0948      * an interrupt doesn't affect this pointer nor the descriptor,
0949      * so no locking should be needed.
0950      */
0951     dp = ctp->put_ptr;
0952 
0953     /*
0954      * If the descriptor is valid, we are way ahead of the DMA
0955      * engine, so just return an error condition.
0956      */
0957     if (dp->dscr_cmd0 & DSCR_CMD0_V)
0958         return 0;
0959 
0960     /* Load up buffer addresses and byte count. */
0961     dp->dscr_dest0 = dscr->dscr_dest0;
0962     dp->dscr_source0 = dscr->dscr_source0;
0963     dp->dscr_dest1 = dscr->dscr_dest1;
0964     dp->dscr_source1 = dscr->dscr_source1;
0965     dp->dscr_cmd1 = dscr->dscr_cmd1;
0966     nbytes = dscr->dscr_cmd1;
0967     /* Allow the caller to specify if an interrupt is generated */
0968     dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
0969     dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
0970     ctp->chan_ptr->ddma_dbell = 0;
0971 
0972     /* Get next descriptor pointer. */
0973     ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
0974 
0975     /* Return something non-zero. */
0976     return nbytes;
0977 }
0978 
0979 
0980 static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
0981 
0982 static int alchemy_dbdma_suspend(void)
0983 {
0984     int i;
0985     void __iomem *addr;
0986 
0987     addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
0988     alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
0989     alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
0990     alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
0991     alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
0992 
0993     /* save channel configurations */
0994     addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
0995     for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
0996         alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
0997         alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
0998         alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
0999         alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
1000         alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
1001         alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
1002 
1003         /* halt channel */
1004         __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
1005         wmb();
1006         while (!(__raw_readl(addr + 0x14) & 1))
1007             wmb();
1008 
1009         addr += 0x100;  /* next channel base */
1010     }
1011     /* disable channel interrupts */
1012     addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1013     __raw_writel(0, addr + 0x0c);
1014     wmb();
1015 
1016     return 0;
1017 }
1018 
1019 static void alchemy_dbdma_resume(void)
1020 {
1021     int i;
1022     void __iomem *addr;
1023 
1024     addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1025     __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
1026     __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
1027     __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
1028     __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
1029 
1030     /* restore channel configurations */
1031     addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
1032     for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
1033         __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
1034         __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
1035         __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
1036         __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
1037         __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
1038         __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
1039         wmb();
1040         addr += 0x100;  /* next channel base */
1041     }
1042 }
1043 
1044 static struct syscore_ops alchemy_dbdma_syscore_ops = {
1045     .suspend    = alchemy_dbdma_suspend,
1046     .resume     = alchemy_dbdma_resume,
1047 };
1048 
1049 static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
1050 {
1051     int ret;
1052 
1053     dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL);
1054     if (!dbdev_tab)
1055         return -ENOMEM;
1056 
1057     memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
1058     for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
1059         dbdev_tab[ret].dev_id = ~0;
1060 
1061     dbdma_gptr->ddma_config = 0;
1062     dbdma_gptr->ddma_throttle = 0;
1063     dbdma_gptr->ddma_inten = 0xffff;
1064     wmb(); /* drain writebuffer */
1065 
1066     ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr);
1067     if (ret)
1068         printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
1069     else {
1070         dbdma_initialized = 1;
1071         register_syscore_ops(&alchemy_dbdma_syscore_ops);
1072     }
1073 
1074     return ret;
1075 }
1076 
1077 static int __init alchemy_dbdma_init(void)
1078 {
1079     switch (alchemy_get_cputype()) {
1080     case ALCHEMY_CPU_AU1550:
1081         return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
1082     case ALCHEMY_CPU_AU1200:
1083         return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
1084     case ALCHEMY_CPU_AU1300:
1085         return dbdma_setup(AU1300_DDMA_INT, au1300_dbdev_tab);
1086     }
1087     return 0;
1088 }
1089 subsys_initcall(alchemy_dbdma_init);