0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
0028
0029 #include <linux/memblock.h>
0030 #include <linux/dma-direct.h>
0031 #include <linux/dma-map-ops.h>
0032 #include <linux/export.h>
0033 #include <xen/swiotlb-xen.h>
0034 #include <xen/page.h>
0035 #include <xen/xen-ops.h>
0036 #include <xen/hvc-console.h>
0037
0038 #include <asm/dma-mapping.h>
0039
0040 #include <trace/events/swiotlb.h>
0041 #define MAX_DMA_BITS 32
0042
0043
0044
0045
0046
0047 static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
0048 {
0049 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
0050 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
0051
0052 baddr |= paddr & ~XEN_PAGE_MASK;
0053 return baddr;
0054 }
0055
0056 static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
0057 {
0058 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
0059 }
0060
0061 static inline phys_addr_t xen_bus_to_phys(struct device *dev,
0062 phys_addr_t baddr)
0063 {
0064 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
0065 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
0066 (baddr & ~XEN_PAGE_MASK);
0067
0068 return paddr;
0069 }
0070
0071 static inline phys_addr_t xen_dma_to_phys(struct device *dev,
0072 dma_addr_t dma_addr)
0073 {
0074 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
0075 }
0076
0077 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
0078 {
0079 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
0080 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
0081
0082 next_bfn = pfn_to_bfn(xen_pfn);
0083
0084 for (i = 1; i < nr_pages; i++)
0085 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
0086 return 1;
0087
0088 return 0;
0089 }
0090
0091 static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
0092 {
0093 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
0094 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
0095 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
0096
0097
0098
0099
0100
0101 if (pfn_valid(PFN_DOWN(paddr)))
0102 return is_swiotlb_buffer(dev, paddr);
0103 return 0;
0104 }
0105
0106 #ifdef CONFIG_X86
0107 int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
0108 {
0109 int rc;
0110 unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
0111 unsigned int i, dma_bits = order + PAGE_SHIFT;
0112 dma_addr_t dma_handle;
0113 phys_addr_t p = virt_to_phys(buf);
0114
0115 BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
0116 BUG_ON(nslabs % IO_TLB_SEGSIZE);
0117
0118 i = 0;
0119 do {
0120 do {
0121 rc = xen_create_contiguous_region(
0122 p + (i << IO_TLB_SHIFT), order,
0123 dma_bits, &dma_handle);
0124 } while (rc && dma_bits++ < MAX_DMA_BITS);
0125 if (rc)
0126 return rc;
0127
0128 i += IO_TLB_SEGSIZE;
0129 } while (i < nslabs);
0130 return 0;
0131 }
0132
0133 static void *
0134 xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
0135 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
0136 {
0137 u64 dma_mask = dev->coherent_dma_mask;
0138 int order = get_order(size);
0139 phys_addr_t phys;
0140 void *ret;
0141
0142
0143 size = 1UL << (order + XEN_PAGE_SHIFT);
0144
0145 ret = (void *)__get_free_pages(flags, get_order(size));
0146 if (!ret)
0147 return ret;
0148 phys = virt_to_phys(ret);
0149
0150 *dma_handle = xen_phys_to_dma(dev, phys);
0151 if (*dma_handle + size - 1 > dma_mask ||
0152 range_straddles_page_boundary(phys, size)) {
0153 if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
0154 dma_handle) != 0)
0155 goto out_free_pages;
0156 SetPageXenRemapped(virt_to_page(ret));
0157 }
0158
0159 memset(ret, 0, size);
0160 return ret;
0161
0162 out_free_pages:
0163 free_pages((unsigned long)ret, get_order(size));
0164 return NULL;
0165 }
0166
0167 static void
0168 xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
0169 dma_addr_t dma_handle, unsigned long attrs)
0170 {
0171 phys_addr_t phys = virt_to_phys(vaddr);
0172 int order = get_order(size);
0173
0174
0175 size = 1UL << (order + XEN_PAGE_SHIFT);
0176
0177 if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
0178 WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
0179 return;
0180
0181 if (TestClearPageXenRemapped(virt_to_page(vaddr)))
0182 xen_destroy_contiguous_region(phys, order);
0183 free_pages((unsigned long)vaddr, get_order(size));
0184 }
0185 #endif
0186
0187
0188
0189
0190
0191
0192
0193
0194 static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
0195 unsigned long offset, size_t size,
0196 enum dma_data_direction dir,
0197 unsigned long attrs)
0198 {
0199 phys_addr_t map, phys = page_to_phys(page) + offset;
0200 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
0201
0202 BUG_ON(dir == DMA_NONE);
0203
0204
0205
0206
0207
0208 if (dma_capable(dev, dev_addr, size, true) &&
0209 !range_straddles_page_boundary(phys, size) &&
0210 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
0211 !is_swiotlb_force_bounce(dev))
0212 goto done;
0213
0214
0215
0216
0217 trace_swiotlb_bounced(dev, dev_addr, size);
0218
0219 map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
0220 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
0221 return DMA_MAPPING_ERROR;
0222
0223 phys = map;
0224 dev_addr = xen_phys_to_dma(dev, map);
0225
0226
0227
0228
0229 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
0230 swiotlb_tbl_unmap_single(dev, map, size, dir,
0231 attrs | DMA_ATTR_SKIP_CPU_SYNC);
0232 return DMA_MAPPING_ERROR;
0233 }
0234
0235 done:
0236 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
0237 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
0238 arch_sync_dma_for_device(phys, size, dir);
0239 else
0240 xen_dma_sync_for_device(dev, dev_addr, size, dir);
0241 }
0242 return dev_addr;
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253 static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
0254 size_t size, enum dma_data_direction dir, unsigned long attrs)
0255 {
0256 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
0257
0258 BUG_ON(dir == DMA_NONE);
0259
0260 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
0261 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
0262 arch_sync_dma_for_cpu(paddr, size, dir);
0263 else
0264 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
0265 }
0266
0267
0268 if (is_xen_swiotlb_buffer(hwdev, dev_addr))
0269 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
0270 }
0271
0272 static void
0273 xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
0274 size_t size, enum dma_data_direction dir)
0275 {
0276 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
0277
0278 if (!dev_is_dma_coherent(dev)) {
0279 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
0280 arch_sync_dma_for_cpu(paddr, size, dir);
0281 else
0282 xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
0283 }
0284
0285 if (is_xen_swiotlb_buffer(dev, dma_addr))
0286 swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
0287 }
0288
0289 static void
0290 xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
0291 size_t size, enum dma_data_direction dir)
0292 {
0293 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
0294
0295 if (is_xen_swiotlb_buffer(dev, dma_addr))
0296 swiotlb_sync_single_for_device(dev, paddr, size, dir);
0297
0298 if (!dev_is_dma_coherent(dev)) {
0299 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
0300 arch_sync_dma_for_device(paddr, size, dir);
0301 else
0302 xen_dma_sync_for_device(dev, dma_addr, size, dir);
0303 }
0304 }
0305
0306
0307
0308
0309
0310 static void
0311 xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
0312 enum dma_data_direction dir, unsigned long attrs)
0313 {
0314 struct scatterlist *sg;
0315 int i;
0316
0317 BUG_ON(dir == DMA_NONE);
0318
0319 for_each_sg(sgl, sg, nelems, i)
0320 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
0321 dir, attrs);
0322
0323 }
0324
0325 static int
0326 xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
0327 enum dma_data_direction dir, unsigned long attrs)
0328 {
0329 struct scatterlist *sg;
0330 int i;
0331
0332 BUG_ON(dir == DMA_NONE);
0333
0334 for_each_sg(sgl, sg, nelems, i) {
0335 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
0336 sg->offset, sg->length, dir, attrs);
0337 if (sg->dma_address == DMA_MAPPING_ERROR)
0338 goto out_unmap;
0339 sg_dma_len(sg) = sg->length;
0340 }
0341
0342 return nelems;
0343 out_unmap:
0344 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
0345 sg_dma_len(sgl) = 0;
0346 return -EIO;
0347 }
0348
0349 static void
0350 xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
0351 int nelems, enum dma_data_direction dir)
0352 {
0353 struct scatterlist *sg;
0354 int i;
0355
0356 for_each_sg(sgl, sg, nelems, i) {
0357 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
0358 sg->length, dir);
0359 }
0360 }
0361
0362 static void
0363 xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
0364 int nelems, enum dma_data_direction dir)
0365 {
0366 struct scatterlist *sg;
0367 int i;
0368
0369 for_each_sg(sgl, sg, nelems, i) {
0370 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
0371 sg->length, dir);
0372 }
0373 }
0374
0375
0376
0377
0378
0379
0380
0381 static int
0382 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
0383 {
0384 return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
0385 }
0386
0387 const struct dma_map_ops xen_swiotlb_dma_ops = {
0388 #ifdef CONFIG_X86
0389 .alloc = xen_swiotlb_alloc_coherent,
0390 .free = xen_swiotlb_free_coherent,
0391 #else
0392 .alloc = dma_direct_alloc,
0393 .free = dma_direct_free,
0394 #endif
0395 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
0396 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
0397 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
0398 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
0399 .map_sg = xen_swiotlb_map_sg,
0400 .unmap_sg = xen_swiotlb_unmap_sg,
0401 .map_page = xen_swiotlb_map_page,
0402 .unmap_page = xen_swiotlb_unmap_page,
0403 .dma_supported = xen_swiotlb_dma_supported,
0404 .mmap = dma_common_mmap,
0405 .get_sgtable = dma_common_get_sgtable,
0406 .alloc_pages = dma_common_alloc_pages,
0407 .free_pages = dma_common_free_pages,
0408 };