0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #ifndef __T7XX_HIF_CLDMA_H__
0019 #define __T7XX_HIF_CLDMA_H__
0020
0021 #include <linux/bits.h>
0022 #include <linux/device.h>
0023 #include <linux/dmapool.h>
0024 #include <linux/pci.h>
0025 #include <linux/skbuff.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/wait.h>
0028 #include <linux/workqueue.h>
0029 #include <linux/types.h>
0030
0031 #include "t7xx_cldma.h"
0032 #include "t7xx_pci.h"
0033
0034
0035
0036
0037
0038
0039
0040 enum cldma_id {
0041 CLDMA_ID_MD,
0042 CLDMA_ID_AP,
0043 CLDMA_NUM
0044 };
0045
0046 struct cldma_gpd {
0047 u8 flags;
0048 u8 not_used1;
0049 __le16 rx_data_allow_len;
0050 __le32 next_gpd_ptr_h;
0051 __le32 next_gpd_ptr_l;
0052 __le32 data_buff_bd_ptr_h;
0053 __le32 data_buff_bd_ptr_l;
0054 __le16 data_buff_len;
0055 __le16 not_used2;
0056 };
0057
0058 struct cldma_request {
0059 struct cldma_gpd *gpd;
0060 dma_addr_t gpd_addr;
0061 struct sk_buff *skb;
0062 dma_addr_t mapped_buff;
0063 struct list_head entry;
0064 };
0065
0066 struct cldma_ring {
0067 struct list_head gpd_ring;
0068 unsigned int length;
0069 int pkt_size;
0070 };
0071
0072 struct cldma_queue {
0073 struct cldma_ctrl *md_ctrl;
0074 enum mtk_txrx dir;
0075 unsigned int index;
0076 struct cldma_ring *tr_ring;
0077 struct cldma_request *tr_done;
0078 struct cldma_request *rx_refill;
0079 struct cldma_request *tx_next;
0080 int budget;
0081 spinlock_t ring_lock;
0082 wait_queue_head_t req_wq;
0083 struct workqueue_struct *worker;
0084 struct work_struct cldma_work;
0085 };
0086
0087 struct cldma_ctrl {
0088 enum cldma_id hif_id;
0089 struct device *dev;
0090 struct t7xx_pci_dev *t7xx_dev;
0091 struct cldma_queue txq[CLDMA_TXQ_NUM];
0092 struct cldma_queue rxq[CLDMA_RXQ_NUM];
0093 unsigned short txq_active;
0094 unsigned short rxq_active;
0095 unsigned short txq_started;
0096 spinlock_t cldma_lock;
0097
0098 struct dma_pool *gpd_dmapool;
0099 struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
0100 struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
0101 struct md_pm_entity *pm_entity;
0102 struct t7xx_cldma_hw hw_info;
0103 bool is_late_init;
0104 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
0105 };
0106
0107 #define GPD_FLAGS_HWO BIT(0)
0108 #define GPD_FLAGS_IOC BIT(7)
0109 #define GPD_DMAPOOL_ALIGN 16
0110
0111 #define CLDMA_MTU 3584
0112
0113 int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
0114 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
0115 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
0116 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
0117 void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
0118 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
0119 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
0120 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
0121 void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
0122 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
0123 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
0124 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
0125 void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
0126
0127 #endif