Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  PowerPC version derived from arch/arm/mm/consistent.c
0004  *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
0005  *
0006  *  Copyright (C) 2000 Russell King
0007  */
0008 
0009 #include <linux/kernel.h>
0010 #include <linux/errno.h>
0011 #include <linux/types.h>
0012 #include <linux/highmem.h>
0013 #include <linux/dma-direct.h>
0014 #include <linux/dma-map-ops.h>
0015 
0016 #include <asm/tlbflush.h>
0017 #include <asm/dma.h>
0018 
0019 /*
0020  * make an area consistent.
0021  */
0022 static void __dma_sync(void *vaddr, size_t size, int direction)
0023 {
0024     unsigned long start = (unsigned long)vaddr;
0025     unsigned long end   = start + size;
0026 
0027     switch (direction) {
0028     case DMA_NONE:
0029         BUG();
0030     case DMA_FROM_DEVICE:
0031         /*
0032          * invalidate only when cache-line aligned otherwise there is
0033          * the potential for discarding uncommitted data from the cache
0034          */
0035         if ((start | end) & (L1_CACHE_BYTES - 1))
0036             flush_dcache_range(start, end);
0037         else
0038             invalidate_dcache_range(start, end);
0039         break;
0040     case DMA_TO_DEVICE:     /* writeback only */
0041         clean_dcache_range(start, end);
0042         break;
0043     case DMA_BIDIRECTIONAL: /* writeback and invalidate */
0044         flush_dcache_range(start, end);
0045         break;
0046     }
0047 }
0048 
0049 #ifdef CONFIG_HIGHMEM
0050 /*
0051  * __dma_sync_page() implementation for systems using highmem.
0052  * In this case, each page of a buffer must be kmapped/kunmapped
0053  * in order to have a virtual address for __dma_sync(). This must
0054  * not sleep so kmap_atomic()/kunmap_atomic() are used.
0055  *
0056  * Note: yes, it is possible and correct to have a buffer extend
0057  * beyond the first page.
0058  */
0059 static inline void __dma_sync_page_highmem(struct page *page,
0060         unsigned long offset, size_t size, int direction)
0061 {
0062     size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
0063     size_t cur_size = seg_size;
0064     unsigned long flags, start, seg_offset = offset;
0065     int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
0066     int seg_nr = 0;
0067 
0068     local_irq_save(flags);
0069 
0070     do {
0071         start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
0072 
0073         /* Sync this buffer segment */
0074         __dma_sync((void *)start, seg_size, direction);
0075         kunmap_atomic((void *)start);
0076         seg_nr++;
0077 
0078         /* Calculate next buffer segment size */
0079         seg_size = min((size_t)PAGE_SIZE, size - cur_size);
0080 
0081         /* Add the segment size to our running total */
0082         cur_size += seg_size;
0083         seg_offset = 0;
0084     } while (seg_nr < nr_segs);
0085 
0086     local_irq_restore(flags);
0087 }
0088 #endif /* CONFIG_HIGHMEM */
0089 
0090 /*
0091  * __dma_sync_page makes memory consistent. identical to __dma_sync, but
0092  * takes a struct page instead of a virtual address
0093  */
0094 static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
0095 {
0096     struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
0097     unsigned offset = paddr & ~PAGE_MASK;
0098 
0099 #ifdef CONFIG_HIGHMEM
0100     __dma_sync_page_highmem(page, offset, size, dir);
0101 #else
0102     unsigned long start = (unsigned long)page_address(page) + offset;
0103     __dma_sync((void *)start, size, dir);
0104 #endif
0105 }
0106 
0107 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
0108         enum dma_data_direction dir)
0109 {
0110     __dma_sync_page(paddr, size, dir);
0111 }
0112 
0113 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
0114         enum dma_data_direction dir)
0115 {
0116     __dma_sync_page(paddr, size, dir);
0117 }
0118 
0119 void arch_dma_prep_coherent(struct page *page, size_t size)
0120 {
0121     unsigned long kaddr = (unsigned long)page_address(page);
0122 
0123     flush_dcache_range(kaddr, kaddr + size);
0124 }