Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * core routines for the asynchronous memory transfer/transform api
0004  *
0005  * Copyright © 2006, Intel Corporation.
0006  *
0007  *  Dan Williams <dan.j.williams@intel.com>
0008  *
0009  *  with architecture considerations by:
0010  *  Neil Brown <neilb@suse.de>
0011  *  Jeff Garzik <jeff@garzik.org>
0012  */
0013 #include <linux/rculist.h>
0014 #include <linux/module.h>
0015 #include <linux/kernel.h>
0016 #include <linux/async_tx.h>
0017 
0018 #ifdef CONFIG_DMA_ENGINE
0019 static int __init async_tx_init(void)
0020 {
0021     async_dmaengine_get();
0022 
0023     printk(KERN_INFO "async_tx: api initialized (async)\n");
0024 
0025     return 0;
0026 }
0027 
0028 static void __exit async_tx_exit(void)
0029 {
0030     async_dmaengine_put();
0031 }
0032 
0033 module_init(async_tx_init);
0034 module_exit(async_tx_exit);
0035 
0036 /**
0037  * __async_tx_find_channel - find a channel to carry out the operation or let
0038  *  the transaction execute synchronously
0039  * @submit: transaction dependency and submission modifiers
0040  * @tx_type: transaction type
0041  */
0042 struct dma_chan *
0043 __async_tx_find_channel(struct async_submit_ctl *submit,
0044             enum dma_transaction_type tx_type)
0045 {
0046     struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
0047 
0048     /* see if we can keep the chain on one channel */
0049     if (depend_tx &&
0050         dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
0051         return depend_tx->chan;
0052     return async_dma_find_channel(tx_type);
0053 }
0054 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
0055 #endif
0056 
0057 
0058 /**
0059  * async_tx_channel_switch - queue an interrupt descriptor with a dependency
0060  *  pre-attached.
0061  * @depend_tx: the operation that must finish before the new operation runs
0062  * @tx: the new operation
0063  */
0064 static void
0065 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
0066             struct dma_async_tx_descriptor *tx)
0067 {
0068     struct dma_chan *chan = depend_tx->chan;
0069     struct dma_device *device = chan->device;
0070     struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
0071 
0072     /* first check to see if we can still append to depend_tx */
0073     txd_lock(depend_tx);
0074     if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
0075         txd_chain(depend_tx, tx);
0076         intr_tx = NULL;
0077     }
0078     txd_unlock(depend_tx);
0079 
0080     /* attached dependency, flush the parent channel */
0081     if (!intr_tx) {
0082         device->device_issue_pending(chan);
0083         return;
0084     }
0085 
0086     /* see if we can schedule an interrupt
0087      * otherwise poll for completion
0088      */
0089     if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
0090         intr_tx = device->device_prep_dma_interrupt(chan, 0);
0091     else
0092         intr_tx = NULL;
0093 
0094     if (intr_tx) {
0095         intr_tx->callback = NULL;
0096         intr_tx->callback_param = NULL;
0097         /* safe to chain outside the lock since we know we are
0098          * not submitted yet
0099          */
0100         txd_chain(intr_tx, tx);
0101 
0102         /* check if we need to append */
0103         txd_lock(depend_tx);
0104         if (txd_parent(depend_tx)) {
0105             txd_chain(depend_tx, intr_tx);
0106             async_tx_ack(intr_tx);
0107             intr_tx = NULL;
0108         }
0109         txd_unlock(depend_tx);
0110 
0111         if (intr_tx) {
0112             txd_clear_parent(intr_tx);
0113             intr_tx->tx_submit(intr_tx);
0114             async_tx_ack(intr_tx);
0115         }
0116         device->device_issue_pending(chan);
0117     } else {
0118         if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
0119             panic("%s: DMA error waiting for depend_tx\n",
0120                   __func__);
0121         tx->tx_submit(tx);
0122     }
0123 }
0124 
0125 
0126 /**
0127  * submit_disposition - flags for routing an incoming operation
0128  * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
0129  * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
0130  * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
0131  *
0132  * while holding depend_tx->lock we must avoid submitting new operations
0133  * to prevent a circular locking dependency with drivers that already
0134  * hold a channel lock when calling async_tx_run_dependencies.
0135  */
0136 enum submit_disposition {
0137     ASYNC_TX_SUBMITTED,
0138     ASYNC_TX_CHANNEL_SWITCH,
0139     ASYNC_TX_DIRECT_SUBMIT,
0140 };
0141 
0142 void
0143 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
0144         struct async_submit_ctl *submit)
0145 {
0146     struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
0147 
0148     tx->callback = submit->cb_fn;
0149     tx->callback_param = submit->cb_param;
0150 
0151     if (depend_tx) {
0152         enum submit_disposition s;
0153 
0154         /* sanity check the dependency chain:
0155          * 1/ if ack is already set then we cannot be sure
0156          * we are referring to the correct operation
0157          * 2/ dependencies are 1:1 i.e. two transactions can
0158          * not depend on the same parent
0159          */
0160         BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
0161                txd_parent(tx));
0162 
0163         /* the lock prevents async_tx_run_dependencies from missing
0164          * the setting of ->next when ->parent != NULL
0165          */
0166         txd_lock(depend_tx);
0167         if (txd_parent(depend_tx)) {
0168             /* we have a parent so we can not submit directly
0169              * if we are staying on the same channel: append
0170              * else: channel switch
0171              */
0172             if (depend_tx->chan == chan) {
0173                 txd_chain(depend_tx, tx);
0174                 s = ASYNC_TX_SUBMITTED;
0175             } else
0176                 s = ASYNC_TX_CHANNEL_SWITCH;
0177         } else {
0178             /* we do not have a parent so we may be able to submit
0179              * directly if we are staying on the same channel
0180              */
0181             if (depend_tx->chan == chan)
0182                 s = ASYNC_TX_DIRECT_SUBMIT;
0183             else
0184                 s = ASYNC_TX_CHANNEL_SWITCH;
0185         }
0186         txd_unlock(depend_tx);
0187 
0188         switch (s) {
0189         case ASYNC_TX_SUBMITTED:
0190             break;
0191         case ASYNC_TX_CHANNEL_SWITCH:
0192             async_tx_channel_switch(depend_tx, tx);
0193             break;
0194         case ASYNC_TX_DIRECT_SUBMIT:
0195             txd_clear_parent(tx);
0196             tx->tx_submit(tx);
0197             break;
0198         }
0199     } else {
0200         txd_clear_parent(tx);
0201         tx->tx_submit(tx);
0202     }
0203 
0204     if (submit->flags & ASYNC_TX_ACK)
0205         async_tx_ack(tx);
0206 
0207     if (depend_tx)
0208         async_tx_ack(depend_tx);
0209 }
0210 EXPORT_SYMBOL_GPL(async_tx_submit);
0211 
0212 /**
0213  * async_trigger_callback - schedules the callback function to be run
0214  * @submit: submission and completion parameters
0215  *
0216  * honored flags: ASYNC_TX_ACK
0217  *
0218  * The callback is run after any dependent operations have completed.
0219  */
0220 struct dma_async_tx_descriptor *
0221 async_trigger_callback(struct async_submit_ctl *submit)
0222 {
0223     struct dma_chan *chan;
0224     struct dma_device *device;
0225     struct dma_async_tx_descriptor *tx;
0226     struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
0227 
0228     if (depend_tx) {
0229         chan = depend_tx->chan;
0230         device = chan->device;
0231 
0232         /* see if we can schedule an interrupt
0233          * otherwise poll for completion
0234          */
0235         if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
0236             device = NULL;
0237 
0238         tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
0239     } else
0240         tx = NULL;
0241 
0242     if (tx) {
0243         pr_debug("%s: (async)\n", __func__);
0244 
0245         async_tx_submit(chan, tx, submit);
0246     } else {
0247         pr_debug("%s: (sync)\n", __func__);
0248 
0249         /* wait for any prerequisite operations */
0250         async_tx_quiesce(&submit->depend_tx);
0251 
0252         async_tx_sync_epilog(submit);
0253     }
0254 
0255     return tx;
0256 }
0257 EXPORT_SYMBOL_GPL(async_trigger_callback);
0258 
0259 /**
0260  * async_tx_quiesce - ensure tx is complete and freeable upon return
0261  * @tx - transaction to quiesce
0262  */
0263 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
0264 {
0265     if (*tx) {
0266         /* if ack is already set then we cannot be sure
0267          * we are referring to the correct operation
0268          */
0269         BUG_ON(async_tx_test_ack(*tx));
0270         if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
0271             panic("%s: DMA error waiting for transaction\n",
0272                   __func__);
0273         async_tx_ack(*tx);
0274         *tx = NULL;
0275     }
0276 }
0277 EXPORT_SYMBOL_GPL(async_tx_quiesce);
0278 
0279 MODULE_AUTHOR("Intel Corporation");
0280 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
0281 MODULE_LICENSE("GPL");