0001
0002
0003
0004
0005
0006
0007
0008 #ifndef QCOM_HIDMA_H
0009 #define QCOM_HIDMA_H
0010
0011 #include <linux/kfifo.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/dmaengine.h>
0014
0015 #define HIDMA_TRE_SIZE 32
0016 #define HIDMA_TRE_CFG_IDX 0
0017 #define HIDMA_TRE_LEN_IDX 1
0018 #define HIDMA_TRE_SRC_LOW_IDX 2
0019 #define HIDMA_TRE_SRC_HI_IDX 3
0020 #define HIDMA_TRE_DEST_LOW_IDX 4
0021 #define HIDMA_TRE_DEST_HI_IDX 5
0022
0023 enum tre_type {
0024 HIDMA_TRE_MEMCPY = 3,
0025 HIDMA_TRE_MEMSET = 4,
0026 };
0027
0028 struct hidma_tre {
0029 atomic_t allocated;
0030 bool queued;
0031 u16 status;
0032 u32 idx;
0033 u32 dma_sig;
0034 const char *dev_name;
0035 void (*callback)(void *data);
0036 void *data;
0037 struct hidma_lldev *lldev;
0038 u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1];
0039 u32 tre_index;
0040 u32 int_flags;
0041 u8 err_info;
0042 u8 err_code;
0043 };
0044
0045 struct hidma_lldev {
0046 bool msi_support;
0047 bool initialized;
0048 u8 trch_state;
0049 u8 evch_state;
0050 u8 chidx;
0051 u32 nr_tres;
0052 spinlock_t lock;
0053 struct hidma_tre *trepool;
0054 struct device *dev;
0055 void __iomem *trca;
0056 void __iomem *evca;
0057 struct hidma_tre
0058 **pending_tre_list;
0059 atomic_t pending_tre_count;
0060
0061 void *tre_ring;
0062 dma_addr_t tre_dma;
0063 u32 tre_ring_size;
0064 u32 tre_processed_off;
0065
0066 void *evre_ring;
0067 dma_addr_t evre_dma;
0068 u32 evre_ring_size;
0069 u32 evre_processed_off;
0070
0071 u32 tre_write_offset;
0072 struct tasklet_struct task;
0073 DECLARE_KFIFO_PTR(handoff_fifo,
0074 struct hidma_tre *);
0075 };
0076
0077 struct hidma_desc {
0078 struct dma_async_tx_descriptor desc;
0079
0080 struct list_head node;
0081 u32 tre_ch;
0082 };
0083
0084 struct hidma_chan {
0085 bool paused;
0086 bool allocated;
0087 char dbg_name[16];
0088 u32 dma_sig;
0089 dma_cookie_t last_success;
0090
0091
0092
0093
0094
0095
0096 struct hidma_dev *dmadev;
0097 struct hidma_desc *running;
0098
0099 struct dma_chan chan;
0100 struct list_head free;
0101 struct list_head prepared;
0102 struct list_head queued;
0103 struct list_head active;
0104 struct list_head completed;
0105
0106
0107 spinlock_t lock;
0108 };
0109
0110 struct hidma_dev {
0111 int irq;
0112 int chidx;
0113 u32 nr_descriptors;
0114 int msi_virqbase;
0115
0116 struct hidma_lldev *lldev;
0117 void __iomem *dev_trca;
0118 struct resource *trca_resource;
0119 void __iomem *dev_evca;
0120 struct resource *evca_resource;
0121
0122
0123 spinlock_t lock;
0124 struct dma_device ddev;
0125
0126 struct dentry *debugfs;
0127
0128
0129 struct device_attribute *chid_attrs;
0130
0131
0132 struct tasklet_struct task;
0133 };
0134
0135 int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
0136 const char *dev_name,
0137 void (*callback)(void *data), void *data, u32 *tre_ch);
0138
0139 void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
0140 enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
0141 bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
0142 void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
0143 void hidma_ll_start(struct hidma_lldev *llhndl);
0144 int hidma_ll_disable(struct hidma_lldev *lldev);
0145 int hidma_ll_enable(struct hidma_lldev *llhndl);
0146 void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
0147 dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
0148 void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
0149 int hidma_ll_setup(struct hidma_lldev *lldev);
0150 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
0151 void __iomem *trca, void __iomem *evca,
0152 u8 chidx);
0153 int hidma_ll_uninit(struct hidma_lldev *llhndl);
0154 irqreturn_t hidma_ll_inthandler(int irq, void *arg);
0155 irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
0156 void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
0157 u8 err_code);
0158 void hidma_debug_init(struct hidma_dev *dmadev);
0159 void hidma_debug_uninit(struct hidma_dev *dmadev);
0160 #endif