Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright © 2006, Intel Corporation.
0004  */
0005 #ifndef _ASYNC_TX_H_
0006 #define _ASYNC_TX_H_
0007 #include <linux/dmaengine.h>
0008 #include <linux/spinlock.h>
0009 #include <linux/interrupt.h>
0010 
0011 /* on architectures without dma-mapping capabilities we need to ensure
0012  * that the asynchronous path compiles away
0013  */
0014 #ifdef CONFIG_HAS_DMA
0015 #define __async_inline
0016 #else
0017 #define __async_inline __always_inline
0018 #endif
0019 
0020 /**
0021  * dma_chan_ref - object used to manage dma channels received from the
0022  *   dmaengine core.
0023  * @chan - the channel being tracked
0024  * @node - node for the channel to be placed on async_tx_master_list
0025  * @rcu - for list_del_rcu
0026  * @count - number of times this channel is listed in the pool
0027  *  (for channels with multiple capabiities)
0028  */
0029 struct dma_chan_ref {
0030     struct dma_chan *chan;
0031     struct list_head node;
0032     struct rcu_head rcu;
0033     atomic_t count;
0034 };
0035 
0036 /**
0037  * async_tx_flags - modifiers for the async_* calls
0038  * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
0039  * destination address is not a source.  The asynchronous case handles this
0040  * implicitly, the synchronous case needs to zero the destination block.
0041  * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
0042  * also one of the source addresses.  In the synchronous case the destination
0043  * address is an implied source, whereas the asynchronous case it must be listed
0044  * as a source.  The destination address must be the first address in the source
0045  * array.
0046  * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
0047  * dependency chain
0048  * @ASYNC_TX_FENCE: specify that the next operation in the dependency
0049  * chain uses this operation's result as an input
0050  * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
0051  * input data. Required for rmw case.
0052  */
0053 enum async_tx_flags {
0054     ASYNC_TX_XOR_ZERO_DST    = (1 << 0),
0055     ASYNC_TX_XOR_DROP_DST    = (1 << 1),
0056     ASYNC_TX_ACK         = (1 << 2),
0057     ASYNC_TX_FENCE       = (1 << 3),
0058     ASYNC_TX_PQ_XOR_DST  = (1 << 4),
0059 };
0060 
0061 /**
0062  * struct async_submit_ctl - async_tx submission/completion modifiers
0063  * @flags: submission modifiers
0064  * @depend_tx: parent dependency of the current operation being submitted
0065  * @cb_fn: callback routine to run at operation completion
0066  * @cb_param: parameter for the callback routine
0067  * @scribble: caller provided space for dma/page address conversions
0068  */
0069 struct async_submit_ctl {
0070     enum async_tx_flags flags;
0071     struct dma_async_tx_descriptor *depend_tx;
0072     dma_async_tx_callback cb_fn;
0073     void *cb_param;
0074     void *scribble;
0075 };
0076 
0077 #if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH)
0078 #define async_tx_issue_pending_all dma_issue_pending_all
0079 
0080 /**
0081  * async_tx_issue_pending - send pending descriptor to the hardware channel
0082  * @tx: descriptor handle to retrieve hardware context
0083  *
0084  * Note: any dependent operations will have already been issued by
0085  * async_tx_channel_switch, or (in the case of no channel switch) will
0086  * be already pending on this channel.
0087  */
0088 static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
0089 {
0090     if (likely(tx)) {
0091         struct dma_chan *chan = tx->chan;
0092         struct dma_device *dma = chan->device;
0093 
0094         dma->device_issue_pending(chan);
0095     }
0096 }
0097 #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
0098 #include <asm/async_tx.h>
0099 #else
0100 #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
0101      __async_tx_find_channel(dep, type)
0102 struct dma_chan *
0103 __async_tx_find_channel(struct async_submit_ctl *submit,
0104             enum dma_transaction_type tx_type);
0105 #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
0106 #else
0107 static inline void async_tx_issue_pending_all(void)
0108 {
0109     do { } while (0);
0110 }
0111 
0112 static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
0113 {
0114     do { } while (0);
0115 }
0116 
0117 static inline struct dma_chan *
0118 async_tx_find_channel(struct async_submit_ctl *submit,
0119               enum dma_transaction_type tx_type, struct page **dst,
0120               int dst_count, struct page **src, int src_count,
0121               size_t len)
0122 {
0123     return NULL;
0124 }
0125 #endif
0126 
0127 /**
0128  * async_tx_sync_epilog - actions to take if an operation is run synchronously
0129  * @cb_fn: function to call when the transaction completes
0130  * @cb_fn_param: parameter to pass to the callback routine
0131  */
0132 static inline void
0133 async_tx_sync_epilog(struct async_submit_ctl *submit)
0134 {
0135     if (submit->cb_fn)
0136         submit->cb_fn(submit->cb_param);
0137 }
0138 
0139 typedef union {
0140     unsigned long addr;
0141     struct page *page;
0142     dma_addr_t dma;
0143 } addr_conv_t;
0144 
0145 static inline void
0146 init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
0147           struct dma_async_tx_descriptor *tx,
0148           dma_async_tx_callback cb_fn, void *cb_param,
0149           addr_conv_t *scribble)
0150 {
0151     args->flags = flags;
0152     args->depend_tx = tx;
0153     args->cb_fn = cb_fn;
0154     args->cb_param = cb_param;
0155     args->scribble = scribble;
0156 }
0157 
0158 void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
0159              struct async_submit_ctl *submit);
0160 
0161 struct dma_async_tx_descriptor *
0162 async_xor(struct page *dest, struct page **src_list, unsigned int offset,
0163       int src_cnt, size_t len, struct async_submit_ctl *submit);
0164 
0165 struct dma_async_tx_descriptor *
0166 async_xor_offs(struct page *dest, unsigned int offset,
0167         struct page **src_list, unsigned int *src_offset,
0168         int src_cnt, size_t len, struct async_submit_ctl *submit);
0169 
0170 struct dma_async_tx_descriptor *
0171 async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
0172           int src_cnt, size_t len, enum sum_check_flags *result,
0173           struct async_submit_ctl *submit);
0174 
0175 struct dma_async_tx_descriptor *
0176 async_xor_val_offs(struct page *dest, unsigned int offset,
0177         struct page **src_list, unsigned int *src_offset,
0178         int src_cnt, size_t len, enum sum_check_flags *result,
0179         struct async_submit_ctl *submit);
0180 
0181 struct dma_async_tx_descriptor *
0182 async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
0183          unsigned int src_offset, size_t len,
0184          struct async_submit_ctl *submit);
0185 
0186 struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
0187 
0188 struct dma_async_tx_descriptor *
0189 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt,
0190            size_t len, struct async_submit_ctl *submit);
0191 
0192 struct dma_async_tx_descriptor *
0193 async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt,
0194            size_t len, enum sum_check_flags *pqres, struct page *spare,
0195            unsigned int s_off, struct async_submit_ctl *submit);
0196 
0197 struct dma_async_tx_descriptor *
0198 async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
0199             struct page **ptrs, unsigned int *offs,
0200             struct async_submit_ctl *submit);
0201 
0202 struct dma_async_tx_descriptor *
0203 async_raid6_datap_recov(int src_num, size_t bytes, int faila,
0204             struct page **ptrs, unsigned int *offs,
0205             struct async_submit_ctl *submit);
0206 
0207 void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
0208 #endif /* _ASYNC_TX_H_ */