Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * copy offload engine support
0004  *
0005  * Copyright © 2006, Intel Corporation.
0006  *
0007  *      Dan Williams <dan.j.williams@intel.com>
0008  *
0009  *      with architecture considerations by:
0010  *      Neil Brown <neilb@suse.de>
0011  *      Jeff Garzik <jeff@garzik.org>
0012  */
0013 #include <linux/kernel.h>
0014 #include <linux/highmem.h>
0015 #include <linux/module.h>
0016 #include <linux/mm.h>
0017 #include <linux/dma-mapping.h>
0018 #include <linux/async_tx.h>
0019 
0020 /**
0021  * async_memcpy - attempt to copy memory with a dma engine.
0022  * @dest: destination page
0023  * @src: src page
0024  * @dest_offset: offset into 'dest' to start transaction
0025  * @src_offset: offset into 'src' to start transaction
0026  * @len: length in bytes
0027  * @submit: submission / completion modifiers
0028  *
0029  * honored flags: ASYNC_TX_ACK
0030  */
0031 struct dma_async_tx_descriptor *
0032 async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
0033          unsigned int src_offset, size_t len,
0034          struct async_submit_ctl *submit)
0035 {
0036     struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
0037                               &dest, 1, &src, 1, len);
0038     struct dma_device *device = chan ? chan->device : NULL;
0039     struct dma_async_tx_descriptor *tx = NULL;
0040     struct dmaengine_unmap_data *unmap = NULL;
0041 
0042     if (device)
0043         unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
0044 
0045     if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
0046         unsigned long dma_prep_flags = 0;
0047 
0048         if (submit->cb_fn)
0049             dma_prep_flags |= DMA_PREP_INTERRUPT;
0050         if (submit->flags & ASYNC_TX_FENCE)
0051             dma_prep_flags |= DMA_PREP_FENCE;
0052 
0053         unmap->to_cnt = 1;
0054         unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
0055                           DMA_TO_DEVICE);
0056         unmap->from_cnt = 1;
0057         unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
0058                           DMA_FROM_DEVICE);
0059         unmap->len = len;
0060 
0061         tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
0062                             unmap->addr[0], len,
0063                             dma_prep_flags);
0064     }
0065 
0066     if (tx) {
0067         pr_debug("%s: (async) len: %zu\n", __func__, len);
0068 
0069         dma_set_unmap(tx, unmap);
0070         async_tx_submit(chan, tx, submit);
0071     } else {
0072         void *dest_buf, *src_buf;
0073         pr_debug("%s: (sync) len: %zu\n", __func__, len);
0074 
0075         /* wait for any prerequisite operations */
0076         async_tx_quiesce(&submit->depend_tx);
0077 
0078         dest_buf = kmap_atomic(dest) + dest_offset;
0079         src_buf = kmap_atomic(src) + src_offset;
0080 
0081         memcpy(dest_buf, src_buf, len);
0082 
0083         kunmap_atomic(src_buf);
0084         kunmap_atomic(dest_buf);
0085 
0086         async_tx_sync_epilog(submit);
0087     }
0088 
0089     dmaengine_unmap_put(unmap);
0090 
0091     return tx;
0092 }
0093 EXPORT_SYMBOL_GPL(async_memcpy);
0094 
0095 MODULE_AUTHOR("Intel Corporation");
0096 MODULE_DESCRIPTION("asynchronous memcpy api");
0097 MODULE_LICENSE("GPL");