0001
0002
0003
0004
0005
0006
0007 #include <linux/gfp.h>
0008 #include <linux/cache.h>
0009 #include <linux/dma-map-ops.h>
0010 #include <linux/dma-iommu.h>
0011 #include <xen/xen.h>
0012
0013 #include <asm/cacheflush.h>
0014 #include <asm/xen/xen-ops.h>
0015
0016 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
0017 enum dma_data_direction dir)
0018 {
0019 unsigned long start = (unsigned long)phys_to_virt(paddr);
0020
0021 dcache_clean_poc(start, start + size);
0022 }
0023
0024 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
0025 enum dma_data_direction dir)
0026 {
0027 unsigned long start = (unsigned long)phys_to_virt(paddr);
0028
0029 if (dir == DMA_TO_DEVICE)
0030 return;
0031
0032 dcache_inval_poc(start, start + size);
0033 }
0034
0035 void arch_dma_prep_coherent(struct page *page, size_t size)
0036 {
0037 unsigned long start = (unsigned long)page_address(page);
0038
0039 dcache_clean_inval_poc(start, start + size);
0040 }
0041
0042 #ifdef CONFIG_IOMMU_DMA
0043 void arch_teardown_dma_ops(struct device *dev)
0044 {
0045 dev->dma_ops = NULL;
0046 }
0047 #endif
0048
0049 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
0050 const struct iommu_ops *iommu, bool coherent)
0051 {
0052 int cls = cache_line_size_of_cpu();
0053
0054 WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN,
0055 TAINT_CPU_OUT_OF_SPEC,
0056 "%s %s: ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
0057 dev_driver_string(dev), dev_name(dev),
0058 ARCH_DMA_MINALIGN, cls);
0059
0060 dev->dma_coherent = coherent;
0061 if (iommu)
0062 iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
0063
0064 xen_setup_dma_ops(dev);
0065 }