Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2010 Broadcom Corporation
0003  *
0004  * Permission to use, copy, modify, and/or distribute this software for any
0005  * purpose with or without fee is hereby granted, provided that the above
0006  * copyright notice and this permission notice appear in all copies.
0007  *
0008  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
0009  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
0010  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
0011  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
0012  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
0013  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
0014  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0015  */
0016 
0017 #ifndef _BRCM_DMA_H_
0018 #define _BRCM_DMA_H_
0019 
0020 #include <linux/delay.h>
0021 #include <linux/skbuff.h>
0022 #include "types.h"      /* forward structure declarations */
0023 
0024 /* map/unmap direction */
0025 #define DMA_TX  1       /* TX direction for DMA */
0026 #define DMA_RX  2       /* RX direction for DMA */
0027 
0028 /* DMA structure:
0029  *  support two DMA engines: 32 bits address or 64 bit addressing
0030  *  basic DMA register set is per channel(transmit or receive)
0031  *  a pair of channels is defined for convenience
0032  */
0033 
0034 /* 32 bits addressing */
0035 
0036 struct dma32diag {  /* diag access */
0037     u32 fifoaddr;   /* diag address */
0038     u32 fifodatalow;    /* low 32bits of data */
0039     u32 fifodatahigh;   /* high 32bits of data */
0040     u32 pad;        /* reserved */
0041 };
0042 
0043 /* 64 bits addressing */
0044 
0045 /* dma registers per channel(xmt or rcv) */
0046 struct dma64regs {
0047     u32 control;    /* enable, et al */
0048     u32 ptr;    /* last descriptor posted to chip */
0049     u32 addrlow;    /* desc ring base address low 32-bits (8K aligned) */
0050     u32 addrhigh;   /* desc ring base address bits 63:32 (8K aligned) */
0051     u32 status0;    /* current descriptor, xmt state */
0052     u32 status1;    /* active descriptor, xmt error */
0053 };
0054 
0055 /* range param for dma_getnexttxp() and dma_txreclaim */
0056 enum txd_range {
0057     DMA_RANGE_ALL = 1,
0058     DMA_RANGE_TRANSMITTED,
0059     DMA_RANGE_TRANSFERED
0060 };
0061 
0062 /*
0063  * Exported data structure (read-only)
0064  */
0065 /* export structure */
0066 struct dma_pub {
0067     uint txavail;       /* # free tx descriptors */
0068     uint dmactrlflags;  /* dma control flags */
0069 
0070     /* rx error counters */
0071     uint rxgiants;      /* rx giant frames */
0072     uint rxnobuf;       /* rx out of dma descriptors */
0073     /* tx error counters */
0074     uint txnobuf;       /* tx out of dma descriptors */
0075 };
0076 
0077 extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
0078                   uint txregbase, uint rxregbase,
0079                   uint ntxd, uint nrxd,
0080                   uint rxbufsize, int rxextheadroom,
0081                   uint nrxpost, uint rxoffset);
0082 
0083 void dma_rxinit(struct dma_pub *pub);
0084 int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
0085 bool dma_rxfill(struct dma_pub *pub);
0086 bool dma_rxreset(struct dma_pub *pub);
0087 bool dma_txreset(struct dma_pub *pub);
0088 void dma_txinit(struct dma_pub *pub);
0089 int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
0090            struct sk_buff *p0);
0091 void dma_txflush(struct dma_pub *pub);
0092 int dma_txpending(struct dma_pub *pub);
0093 void dma_kick_tx(struct dma_pub *pub);
0094 void dma_txsuspend(struct dma_pub *pub);
0095 bool dma_txsuspended(struct dma_pub *pub);
0096 void dma_txresume(struct dma_pub *pub);
0097 void dma_txreclaim(struct dma_pub *pub, enum txd_range range);
0098 void dma_rxreclaim(struct dma_pub *pub);
0099 void dma_detach(struct dma_pub *pub);
0100 unsigned long dma_getvar(struct dma_pub *pub, const char *name);
0101 struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
0102 void dma_counterreset(struct dma_pub *pub);
0103 
0104 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
0105               (void *pkt, void *arg_a), void *arg_a);
0106 
0107 /*
0108  * DMA(Bug) on bcm47xx chips seems to declare that the packet is ready, but
0109  * the packet length is not updated yet (by DMA) on the expected time.
0110  * Workaround is to hold processor till DMA updates the length, and stay off
0111  * the bus to allow DMA update the length in buffer
0112  */
0113 static inline void dma_spin_for_len(uint len, struct sk_buff *head)
0114 {
0115 #if defined(CONFIG_BCM47XX)
0116     if (!len) {
0117         while (!(len = *(u16 *) KSEG1ADDR(head->data)))
0118             udelay(1);
0119 
0120         *(u16 *) (head->data) = cpu_to_le16((u16) len);
0121     }
0122 #endif              /* defined(CONFIG_BCM47XX) */
0123 }
0124 
0125 #endif              /* _BRCM_DMA_H_ */