Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  */
0005 
0006 #include <linux/dma-map-ops.h>
0007 #include <asm/cache.h>
0008 #include <asm/cacheflush.h>
0009 
0010 /*
0011  * ARCH specific callbacks for generic noncoherent DMA ops
0012  *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
0013  *  - But still handle both coherent and non-coherent requests from caller
0014  *
0015  * For DMA coherent hardware (IOC) generic code suffices
0016  */
0017 
0018 void arch_dma_prep_coherent(struct page *page, size_t size)
0019 {
0020     /*
0021      * Evict any existing L1 and/or L2 lines for the backing page
0022      * in case it was used earlier as a normal "cached" page.
0023      * Yeah this bit us - STAR 9000898266
0024      *
0025      * Although core does call flush_cache_vmap(), it gets kvaddr hence
0026      * can't be used to efficiently flush L1 and/or L2 which need paddr
0027      * Currently flush_cache_vmap nukes the L1 cache completely which
0028      * will be optimized as a separate commit
0029      */
0030     dma_cache_wback_inv(page_to_phys(page), size);
0031 }
0032 
0033 /*
0034  * Cache operations depending on function and direction argument, inspired by
0035  * https://lore.kernel.org/lkml/20180518175004.GF17671@n2100.armlinux.org.uk
0036  * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
0037  * dma-mapping: provide a generic dma-noncoherent implementation)"
0038  *
0039  *          |   map          ==  for_device     |   unmap     ==  for_cpu
0040  *          |----------------------------------------------------------------
0041  * TO_DEV   |   writeback        writeback      |   none          none
0042  * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
0043  * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
0044  *
0045  *     [*] needed for CPU speculative prefetches
0046  *
0047  * NOTE: we don't check the validity of direction argument as it is done in
0048  * upper layer functions (in include/linux/dma-mapping.h)
0049  */
0050 
0051 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
0052         enum dma_data_direction dir)
0053 {
0054     switch (dir) {
0055     case DMA_TO_DEVICE:
0056         dma_cache_wback(paddr, size);
0057         break;
0058 
0059     case DMA_FROM_DEVICE:
0060         dma_cache_inv(paddr, size);
0061         break;
0062 
0063     case DMA_BIDIRECTIONAL:
0064         dma_cache_wback_inv(paddr, size);
0065         break;
0066 
0067     default:
0068         break;
0069     }
0070 }
0071 
0072 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
0073         enum dma_data_direction dir)
0074 {
0075     switch (dir) {
0076     case DMA_TO_DEVICE:
0077         break;
0078 
0079     /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
0080     case DMA_FROM_DEVICE:
0081     case DMA_BIDIRECTIONAL:
0082         dma_cache_inv(paddr, size);
0083         break;
0084 
0085     default:
0086         break;
0087     }
0088 }
0089 
0090 /*
0091  * Plug in direct dma map ops.
0092  */
0093 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
0094             const struct iommu_ops *iommu, bool coherent)
0095 {
0096     /*
0097      * IOC hardware snoops all DMA traffic keeping the caches consistent
0098      * with memory - eliding need for any explicit cache maintenance of
0099      * DMA buffers.
0100      */
0101     if (is_isa_arcv2() && ioc_enable && coherent)
0102         dev->dma_coherent = true;
0103 
0104     dev_info(dev, "use %scoherent DMA ops\n",
0105          dev->dma_coherent ? "" : "non");
0106 }