0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/errno.h>
0011 #include <linux/types.h>
0012 #include <linux/highmem.h>
0013 #include <linux/dma-direct.h>
0014 #include <linux/dma-map-ops.h>
0015
0016 #include <asm/tlbflush.h>
0017 #include <asm/dma.h>
0018
0019
0020
0021
0022 static void __dma_sync(void *vaddr, size_t size, int direction)
0023 {
0024 unsigned long start = (unsigned long)vaddr;
0025 unsigned long end = start + size;
0026
0027 switch (direction) {
0028 case DMA_NONE:
0029 BUG();
0030 case DMA_FROM_DEVICE:
0031
0032
0033
0034
0035 if ((start | end) & (L1_CACHE_BYTES - 1))
0036 flush_dcache_range(start, end);
0037 else
0038 invalidate_dcache_range(start, end);
0039 break;
0040 case DMA_TO_DEVICE:
0041 clean_dcache_range(start, end);
0042 break;
0043 case DMA_BIDIRECTIONAL:
0044 flush_dcache_range(start, end);
0045 break;
0046 }
0047 }
0048
0049 #ifdef CONFIG_HIGHMEM
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 static inline void __dma_sync_page_highmem(struct page *page,
0060 unsigned long offset, size_t size, int direction)
0061 {
0062 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
0063 size_t cur_size = seg_size;
0064 unsigned long flags, start, seg_offset = offset;
0065 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
0066 int seg_nr = 0;
0067
0068 local_irq_save(flags);
0069
0070 do {
0071 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
0072
0073
0074 __dma_sync((void *)start, seg_size, direction);
0075 kunmap_atomic((void *)start);
0076 seg_nr++;
0077
0078
0079 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
0080
0081
0082 cur_size += seg_size;
0083 seg_offset = 0;
0084 } while (seg_nr < nr_segs);
0085
0086 local_irq_restore(flags);
0087 }
0088 #endif
0089
0090
0091
0092
0093
0094 static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
0095 {
0096 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
0097 unsigned offset = paddr & ~PAGE_MASK;
0098
0099 #ifdef CONFIG_HIGHMEM
0100 __dma_sync_page_highmem(page, offset, size, dir);
0101 #else
0102 unsigned long start = (unsigned long)page_address(page) + offset;
0103 __dma_sync((void *)start, size, dir);
0104 #endif
0105 }
0106
0107 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
0108 enum dma_data_direction dir)
0109 {
0110 __dma_sync_page(paddr, size, dir);
0111 }
0112
0113 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
0114 enum dma_data_direction dir)
0115 {
0116 __dma_sync_page(paddr, size, dir);
0117 }
0118
0119 void arch_dma_prep_coherent(struct page *page, size_t size)
0120 {
0121 unsigned long kaddr = (unsigned long)page_address(page);
0122
0123 flush_dcache_range(kaddr, kaddr + size);
0124 }