Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
0004  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
0005  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
0006  */
0007 #include <linux/dma-direct.h>
0008 #include <linux/dma-map-ops.h>
0009 #include <linux/highmem.h>
0010 
0011 #include <asm/cache.h>
0012 #include <asm/cpu-type.h>
0013 #include <asm/io.h>
0014 
0015 /*
0016  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
0017  * fill random cachelines with stale data at any time, requiring an extra
0018  * flush post-DMA.
0019  *
0020  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
0021  * terminology calls memory areas with hardware maintained coherency coherent.
0022  *
0023  * Note that the R14000 and R16000 should also be checked for in this condition.
0024  * However this function is only called on non-I/O-coherent systems and only the
0025  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
0026  * SGI IP32 aka O2.
0027  */
0028 static inline bool cpu_needs_post_dma_flush(void)
0029 {
0030     switch (boot_cpu_type()) {
0031     case CPU_R10000:
0032     case CPU_R12000:
0033     case CPU_BMIPS5000:
0034     case CPU_LOONGSON2EF:
0035     case CPU_XBURST:
0036         return true;
0037     default:
0038         /*
0039          * Presence of MAARs suggests that the CPU supports
0040          * speculatively prefetching data, and therefore requires
0041          * the post-DMA flush/invalidate.
0042          */
0043         return cpu_has_maar;
0044     }
0045 }
0046 
0047 void arch_dma_prep_coherent(struct page *page, size_t size)
0048 {
0049     dma_cache_wback_inv((unsigned long)page_address(page), size);
0050 }
0051 
0052 void *arch_dma_set_uncached(void *addr, size_t size)
0053 {
0054     return (void *)(__pa(addr) + UNCAC_BASE);
0055 }
0056 
0057 static inline void dma_sync_virt_for_device(void *addr, size_t size,
0058         enum dma_data_direction dir)
0059 {
0060     switch (dir) {
0061     case DMA_TO_DEVICE:
0062         dma_cache_wback((unsigned long)addr, size);
0063         break;
0064     case DMA_FROM_DEVICE:
0065         dma_cache_inv((unsigned long)addr, size);
0066         break;
0067     case DMA_BIDIRECTIONAL:
0068         dma_cache_wback_inv((unsigned long)addr, size);
0069         break;
0070     default:
0071         BUG();
0072     }
0073 }
0074 
0075 static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
0076         enum dma_data_direction dir)
0077 {
0078     switch (dir) {
0079     case DMA_TO_DEVICE:
0080         break;
0081     case DMA_FROM_DEVICE:
0082     case DMA_BIDIRECTIONAL:
0083         dma_cache_inv((unsigned long)addr, size);
0084         break;
0085     default:
0086         BUG();
0087     }
0088 }
0089 
0090 /*
0091  * A single sg entry may refer to multiple physically contiguous pages.  But
0092  * we still need to process highmem pages individually.  If highmem is not
0093  * configured then the bulk of this loop gets optimized out.
0094  */
0095 static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
0096         enum dma_data_direction dir, bool for_device)
0097 {
0098     struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
0099     unsigned long offset = paddr & ~PAGE_MASK;
0100     size_t left = size;
0101 
0102     do {
0103         size_t len = left;
0104         void *addr;
0105 
0106         if (PageHighMem(page)) {
0107             if (offset + len > PAGE_SIZE)
0108                 len = PAGE_SIZE - offset;
0109         }
0110 
0111         addr = kmap_atomic(page);
0112         if (for_device)
0113             dma_sync_virt_for_device(addr + offset, len, dir);
0114         else
0115             dma_sync_virt_for_cpu(addr + offset, len, dir);
0116         kunmap_atomic(addr);
0117 
0118         offset = 0;
0119         page++;
0120         left -= len;
0121     } while (left);
0122 }
0123 
0124 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
0125         enum dma_data_direction dir)
0126 {
0127     dma_sync_phys(paddr, size, dir, true);
0128 }
0129 
0130 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
0131 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
0132         enum dma_data_direction dir)
0133 {
0134     if (cpu_needs_post_dma_flush())
0135         dma_sync_phys(paddr, size, dir, false);
0136 }
0137 #endif
0138 
0139 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
0140 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
0141         const struct iommu_ops *iommu, bool coherent)
0142 {
0143     dev->dma_coherent = coherent;
0144 }
0145 #endif