0001
0002
0003
0004
0005 #ifndef _ASYNC_TX_H_
0006 #define _ASYNC_TX_H_
0007 #include <linux/dmaengine.h>
0008 #include <linux/spinlock.h>
0009 #include <linux/interrupt.h>
0010
0011
0012
0013
0014 #ifdef CONFIG_HAS_DMA
0015 #define __async_inline
0016 #else
0017 #define __async_inline __always_inline
0018 #endif
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 struct dma_chan_ref {
0030 struct dma_chan *chan;
0031 struct list_head node;
0032 struct rcu_head rcu;
0033 atomic_t count;
0034 };
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 enum async_tx_flags {
0054 ASYNC_TX_XOR_ZERO_DST = (1 << 0),
0055 ASYNC_TX_XOR_DROP_DST = (1 << 1),
0056 ASYNC_TX_ACK = (1 << 2),
0057 ASYNC_TX_FENCE = (1 << 3),
0058 ASYNC_TX_PQ_XOR_DST = (1 << 4),
0059 };
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069 struct async_submit_ctl {
0070 enum async_tx_flags flags;
0071 struct dma_async_tx_descriptor *depend_tx;
0072 dma_async_tx_callback cb_fn;
0073 void *cb_param;
0074 void *scribble;
0075 };
0076
0077 #if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH)
0078 #define async_tx_issue_pending_all dma_issue_pending_all
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
0089 {
0090 if (likely(tx)) {
0091 struct dma_chan *chan = tx->chan;
0092 struct dma_device *dma = chan->device;
0093
0094 dma->device_issue_pending(chan);
0095 }
0096 }
0097 #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
0098 #include <asm/async_tx.h>
0099 #else
0100 #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
0101 __async_tx_find_channel(dep, type)
0102 struct dma_chan *
0103 __async_tx_find_channel(struct async_submit_ctl *submit,
0104 enum dma_transaction_type tx_type);
0105 #endif
0106 #else
0107 static inline void async_tx_issue_pending_all(void)
0108 {
0109 do { } while (0);
0110 }
0111
0112 static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
0113 {
0114 do { } while (0);
0115 }
0116
0117 static inline struct dma_chan *
0118 async_tx_find_channel(struct async_submit_ctl *submit,
0119 enum dma_transaction_type tx_type, struct page **dst,
0120 int dst_count, struct page **src, int src_count,
0121 size_t len)
0122 {
0123 return NULL;
0124 }
0125 #endif
0126
0127
0128
0129
0130
0131
0132 static inline void
0133 async_tx_sync_epilog(struct async_submit_ctl *submit)
0134 {
0135 if (submit->cb_fn)
0136 submit->cb_fn(submit->cb_param);
0137 }
0138
0139 typedef union {
0140 unsigned long addr;
0141 struct page *page;
0142 dma_addr_t dma;
0143 } addr_conv_t;
0144
0145 static inline void
0146 init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
0147 struct dma_async_tx_descriptor *tx,
0148 dma_async_tx_callback cb_fn, void *cb_param,
0149 addr_conv_t *scribble)
0150 {
0151 args->flags = flags;
0152 args->depend_tx = tx;
0153 args->cb_fn = cb_fn;
0154 args->cb_param = cb_param;
0155 args->scribble = scribble;
0156 }
0157
0158 void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
0159 struct async_submit_ctl *submit);
0160
0161 struct dma_async_tx_descriptor *
0162 async_xor(struct page *dest, struct page **src_list, unsigned int offset,
0163 int src_cnt, size_t len, struct async_submit_ctl *submit);
0164
0165 struct dma_async_tx_descriptor *
0166 async_xor_offs(struct page *dest, unsigned int offset,
0167 struct page **src_list, unsigned int *src_offset,
0168 int src_cnt, size_t len, struct async_submit_ctl *submit);
0169
0170 struct dma_async_tx_descriptor *
0171 async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
0172 int src_cnt, size_t len, enum sum_check_flags *result,
0173 struct async_submit_ctl *submit);
0174
0175 struct dma_async_tx_descriptor *
0176 async_xor_val_offs(struct page *dest, unsigned int offset,
0177 struct page **src_list, unsigned int *src_offset,
0178 int src_cnt, size_t len, enum sum_check_flags *result,
0179 struct async_submit_ctl *submit);
0180
0181 struct dma_async_tx_descriptor *
0182 async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
0183 unsigned int src_offset, size_t len,
0184 struct async_submit_ctl *submit);
0185
0186 struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
0187
0188 struct dma_async_tx_descriptor *
0189 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt,
0190 size_t len, struct async_submit_ctl *submit);
0191
0192 struct dma_async_tx_descriptor *
0193 async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt,
0194 size_t len, enum sum_check_flags *pqres, struct page *spare,
0195 unsigned int s_off, struct async_submit_ctl *submit);
0196
0197 struct dma_async_tx_descriptor *
0198 async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
0199 struct page **ptrs, unsigned int *offs,
0200 struct async_submit_ctl *submit);
0201
0202 struct dma_async_tx_descriptor *
0203 async_raid6_datap_recov(int src_num, size_t bytes, int faila,
0204 struct page **ptrs, unsigned int *offs,
0205 struct async_submit_ctl *submit);
0206
0207 void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
0208 #endif