0001
0002
0003
0004
0005
0006
0007
0008 #ifndef __DAVINCI_CPDMA_H__
0009 #define __DAVINCI_CPDMA_H__
0010
0011 #define CPDMA_MAX_CHANNELS BITS_PER_LONG
0012
0013 #define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
0014
0015 #define CPDMA_RX_VLAN_ENCAP BIT(19)
0016
0017 #define CPDMA_EOI_RX_THRESH 0x0
0018 #define CPDMA_EOI_RX 0x1
0019 #define CPDMA_EOI_TX 0x2
0020 #define CPDMA_EOI_MISC 0x3
0021
0022 struct cpdma_params {
0023 struct device *dev;
0024 void __iomem *dmaregs;
0025 void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
0026 void __iomem *rxthresh, *rxfree;
0027 int num_chan;
0028 bool has_soft_reset;
0029 int min_packet_size;
0030 dma_addr_t desc_mem_phys;
0031 dma_addr_t desc_hw_addr;
0032 int desc_mem_size;
0033 int desc_align;
0034 u32 bus_freq_mhz;
0035 u32 descs_pool_size;
0036
0037
0038
0039
0040
0041
0042 bool has_ext_regs;
0043 };
0044
0045 struct cpdma_chan_stats {
0046 u32 head_enqueue;
0047 u32 tail_enqueue;
0048 u32 pad_enqueue;
0049 u32 misqueued;
0050 u32 desc_alloc_fail;
0051 u32 pad_alloc_fail;
0052 u32 runt_receive_buff;
0053 u32 runt_transmit_buff;
0054 u32 empty_dequeue;
0055 u32 busy_dequeue;
0056 u32 good_dequeue;
0057 u32 requeue;
0058 u32 teardown_dequeue;
0059 };
0060
0061 struct cpdma_ctlr;
0062 struct cpdma_chan;
0063
0064 typedef void (*cpdma_handler_fn)(void *token, int len, int status);
0065
0066 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
0067 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
0068 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
0069 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
0070
0071 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
0072 cpdma_handler_fn handler, int rx_type);
0073 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan);
0074 int cpdma_chan_destroy(struct cpdma_chan *chan);
0075 int cpdma_chan_start(struct cpdma_chan *chan);
0076 int cpdma_chan_stop(struct cpdma_chan *chan);
0077
0078 int cpdma_chan_get_stats(struct cpdma_chan *chan,
0079 struct cpdma_chan_stats *stats);
0080 int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
0081 dma_addr_t data, int len, int directed);
0082 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
0083 int len, int directed);
0084 int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
0085 dma_addr_t data, int len, int directed);
0086 int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
0087 int len, int directed);
0088 int cpdma_chan_process(struct cpdma_chan *chan, int quota);
0089
0090 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
0091 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
0092 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
0093 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr);
0094 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr);
0095 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
0096 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight);
0097 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate);
0098 u32 cpdma_chan_get_rate(struct cpdma_chan *ch);
0099 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr);
0100
0101 enum cpdma_control {
0102 CPDMA_TX_RLIM,
0103 CPDMA_CMD_IDLE,
0104 CPDMA_COPY_ERROR_FRAMES,
0105 CPDMA_RX_OFF_LEN_UPDATE,
0106 CPDMA_RX_OWNERSHIP_FLIP,
0107 CPDMA_TX_PRIO_FIXED,
0108 CPDMA_STAT_IDLE,
0109 CPDMA_STAT_TX_ERR_CHAN,
0110 CPDMA_STAT_TX_ERR_CODE,
0111 CPDMA_STAT_RX_ERR_CHAN,
0112 CPDMA_STAT_RX_ERR_CODE,
0113 CPDMA_RX_BUFFER_OFFSET,
0114 };
0115
0116 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
0117 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
0118 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
0119 int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
0120 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
0121
0122 #endif