0001
0002
0003
0004
0005
0006
0007 #include <linux/dma-direct.h>
0008 #include <linux/dma-map-ops.h>
0009 #include <linux/highmem.h>
0010
0011 #include <asm/cache.h>
0012 #include <asm/cpu-type.h>
0013 #include <asm/io.h>
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 static inline bool cpu_needs_post_dma_flush(void)
0029 {
0030 switch (boot_cpu_type()) {
0031 case CPU_R10000:
0032 case CPU_R12000:
0033 case CPU_BMIPS5000:
0034 case CPU_LOONGSON2EF:
0035 case CPU_XBURST:
0036 return true;
0037 default:
0038
0039
0040
0041
0042
0043 return cpu_has_maar;
0044 }
0045 }
0046
0047 void arch_dma_prep_coherent(struct page *page, size_t size)
0048 {
0049 dma_cache_wback_inv((unsigned long)page_address(page), size);
0050 }
0051
0052 void *arch_dma_set_uncached(void *addr, size_t size)
0053 {
0054 return (void *)(__pa(addr) + UNCAC_BASE);
0055 }
0056
0057 static inline void dma_sync_virt_for_device(void *addr, size_t size,
0058 enum dma_data_direction dir)
0059 {
0060 switch (dir) {
0061 case DMA_TO_DEVICE:
0062 dma_cache_wback((unsigned long)addr, size);
0063 break;
0064 case DMA_FROM_DEVICE:
0065 dma_cache_inv((unsigned long)addr, size);
0066 break;
0067 case DMA_BIDIRECTIONAL:
0068 dma_cache_wback_inv((unsigned long)addr, size);
0069 break;
0070 default:
0071 BUG();
0072 }
0073 }
0074
0075 static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
0076 enum dma_data_direction dir)
0077 {
0078 switch (dir) {
0079 case DMA_TO_DEVICE:
0080 break;
0081 case DMA_FROM_DEVICE:
0082 case DMA_BIDIRECTIONAL:
0083 dma_cache_inv((unsigned long)addr, size);
0084 break;
0085 default:
0086 BUG();
0087 }
0088 }
0089
0090
0091
0092
0093
0094
0095 static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
0096 enum dma_data_direction dir, bool for_device)
0097 {
0098 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
0099 unsigned long offset = paddr & ~PAGE_MASK;
0100 size_t left = size;
0101
0102 do {
0103 size_t len = left;
0104 void *addr;
0105
0106 if (PageHighMem(page)) {
0107 if (offset + len > PAGE_SIZE)
0108 len = PAGE_SIZE - offset;
0109 }
0110
0111 addr = kmap_atomic(page);
0112 if (for_device)
0113 dma_sync_virt_for_device(addr + offset, len, dir);
0114 else
0115 dma_sync_virt_for_cpu(addr + offset, len, dir);
0116 kunmap_atomic(addr);
0117
0118 offset = 0;
0119 page++;
0120 left -= len;
0121 } while (left);
0122 }
0123
0124 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
0125 enum dma_data_direction dir)
0126 {
0127 dma_sync_phys(paddr, size, dir, true);
0128 }
0129
0130 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
0131 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
0132 enum dma_data_direction dir)
0133 {
0134 if (cpu_needs_post_dma_flush())
0135 dma_sync_phys(paddr, size, dir, false);
0136 }
0137 #endif
0138
0139 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
0140 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
0141 const struct iommu_ops *iommu, bool coherent)
0142 {
0143 dev->dma_coherent = coherent;
0144 }
0145 #endif