0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/memblock.h> /* for max_pfn */
0009 #include <linux/acpi.h>
0010 #include <linux/dma-map-ops.h>
0011 #include <linux/export.h>
0012 #include <linux/gfp.h>
0013 #include <linux/of_device.h>
0014 #include <linux/slab.h>
0015 #include <linux/vmalloc.h>
0016 #include "debug.h"
0017 #include "direct.h"
0018
0019 bool dma_default_coherent;
0020
0021
0022
0023
0024 struct dma_devres {
0025 size_t size;
0026 void *vaddr;
0027 dma_addr_t dma_handle;
0028 unsigned long attrs;
0029 };
0030
0031 static void dmam_release(struct device *dev, void *res)
0032 {
0033 struct dma_devres *this = res;
0034
0035 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
0036 this->attrs);
0037 }
0038
0039 static int dmam_match(struct device *dev, void *res, void *match_data)
0040 {
0041 struct dma_devres *this = res, *match = match_data;
0042
0043 if (this->vaddr == match->vaddr) {
0044 WARN_ON(this->size != match->size ||
0045 this->dma_handle != match->dma_handle);
0046 return 1;
0047 }
0048 return 0;
0049 }
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
0061 dma_addr_t dma_handle)
0062 {
0063 struct dma_devres match_data = { size, vaddr, dma_handle };
0064
0065 dma_free_coherent(dev, size, vaddr, dma_handle);
0066 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
0067 }
0068 EXPORT_SYMBOL(dmam_free_coherent);
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
0085 gfp_t gfp, unsigned long attrs)
0086 {
0087 struct dma_devres *dr;
0088 void *vaddr;
0089
0090 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
0091 if (!dr)
0092 return NULL;
0093
0094 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
0095 if (!vaddr) {
0096 devres_free(dr);
0097 return NULL;
0098 }
0099
0100 dr->vaddr = vaddr;
0101 dr->dma_handle = *dma_handle;
0102 dr->size = size;
0103 dr->attrs = attrs;
0104
0105 devres_add(dev, dr);
0106
0107 return vaddr;
0108 }
0109 EXPORT_SYMBOL(dmam_alloc_attrs);
0110
0111 static bool dma_go_direct(struct device *dev, dma_addr_t mask,
0112 const struct dma_map_ops *ops)
0113 {
0114 if (likely(!ops))
0115 return true;
0116 #ifdef CONFIG_DMA_OPS_BYPASS
0117 if (dev->dma_ops_bypass)
0118 return min_not_zero(mask, dev->bus_dma_limit) >=
0119 dma_direct_get_required_mask(dev);
0120 #endif
0121 return false;
0122 }
0123
0124
0125
0126
0127
0128
0129
0130 static inline bool dma_alloc_direct(struct device *dev,
0131 const struct dma_map_ops *ops)
0132 {
0133 return dma_go_direct(dev, dev->coherent_dma_mask, ops);
0134 }
0135
0136 static inline bool dma_map_direct(struct device *dev,
0137 const struct dma_map_ops *ops)
0138 {
0139 return dma_go_direct(dev, *dev->dma_mask, ops);
0140 }
0141
0142 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
0143 size_t offset, size_t size, enum dma_data_direction dir,
0144 unsigned long attrs)
0145 {
0146 const struct dma_map_ops *ops = get_dma_ops(dev);
0147 dma_addr_t addr;
0148
0149 BUG_ON(!valid_dma_direction(dir));
0150
0151 if (WARN_ON_ONCE(!dev->dma_mask))
0152 return DMA_MAPPING_ERROR;
0153
0154 if (dma_map_direct(dev, ops) ||
0155 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
0156 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
0157 else
0158 addr = ops->map_page(dev, page, offset, size, dir, attrs);
0159 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
0160
0161 return addr;
0162 }
0163 EXPORT_SYMBOL(dma_map_page_attrs);
0164
0165 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
0166 enum dma_data_direction dir, unsigned long attrs)
0167 {
0168 const struct dma_map_ops *ops = get_dma_ops(dev);
0169
0170 BUG_ON(!valid_dma_direction(dir));
0171 if (dma_map_direct(dev, ops) ||
0172 arch_dma_unmap_page_direct(dev, addr + size))
0173 dma_direct_unmap_page(dev, addr, size, dir, attrs);
0174 else if (ops->unmap_page)
0175 ops->unmap_page(dev, addr, size, dir, attrs);
0176 debug_dma_unmap_page(dev, addr, size, dir);
0177 }
0178 EXPORT_SYMBOL(dma_unmap_page_attrs);
0179
0180 static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
0181 int nents, enum dma_data_direction dir, unsigned long attrs)
0182 {
0183 const struct dma_map_ops *ops = get_dma_ops(dev);
0184 int ents;
0185
0186 BUG_ON(!valid_dma_direction(dir));
0187
0188 if (WARN_ON_ONCE(!dev->dma_mask))
0189 return 0;
0190
0191 if (dma_map_direct(dev, ops) ||
0192 arch_dma_map_sg_direct(dev, sg, nents))
0193 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
0194 else
0195 ents = ops->map_sg(dev, sg, nents, dir, attrs);
0196
0197 if (ents > 0)
0198 debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
0199 else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
0200 ents != -EIO && ents != -EREMOTEIO))
0201 return -EIO;
0202
0203 return ents;
0204 }
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
0224 int nents, enum dma_data_direction dir, unsigned long attrs)
0225 {
0226 int ret;
0227
0228 ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
0229 if (ret < 0)
0230 return 0;
0231 return ret;
0232 }
0233 EXPORT_SYMBOL(dma_map_sg_attrs);
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
0263 enum dma_data_direction dir, unsigned long attrs)
0264 {
0265 int nents;
0266
0267 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
0268 if (nents < 0)
0269 return nents;
0270 sgt->nents = nents;
0271 return 0;
0272 }
0273 EXPORT_SYMBOL_GPL(dma_map_sgtable);
0274
0275 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
0276 int nents, enum dma_data_direction dir,
0277 unsigned long attrs)
0278 {
0279 const struct dma_map_ops *ops = get_dma_ops(dev);
0280
0281 BUG_ON(!valid_dma_direction(dir));
0282 debug_dma_unmap_sg(dev, sg, nents, dir);
0283 if (dma_map_direct(dev, ops) ||
0284 arch_dma_unmap_sg_direct(dev, sg, nents))
0285 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
0286 else if (ops->unmap_sg)
0287 ops->unmap_sg(dev, sg, nents, dir, attrs);
0288 }
0289 EXPORT_SYMBOL(dma_unmap_sg_attrs);
0290
0291 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
0292 size_t size, enum dma_data_direction dir, unsigned long attrs)
0293 {
0294 const struct dma_map_ops *ops = get_dma_ops(dev);
0295 dma_addr_t addr = DMA_MAPPING_ERROR;
0296
0297 BUG_ON(!valid_dma_direction(dir));
0298
0299 if (WARN_ON_ONCE(!dev->dma_mask))
0300 return DMA_MAPPING_ERROR;
0301
0302 if (dma_map_direct(dev, ops))
0303 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
0304 else if (ops->map_resource)
0305 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
0306
0307 debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
0308 return addr;
0309 }
0310 EXPORT_SYMBOL(dma_map_resource);
0311
0312 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
0313 enum dma_data_direction dir, unsigned long attrs)
0314 {
0315 const struct dma_map_ops *ops = get_dma_ops(dev);
0316
0317 BUG_ON(!valid_dma_direction(dir));
0318 if (!dma_map_direct(dev, ops) && ops->unmap_resource)
0319 ops->unmap_resource(dev, addr, size, dir, attrs);
0320 debug_dma_unmap_resource(dev, addr, size, dir);
0321 }
0322 EXPORT_SYMBOL(dma_unmap_resource);
0323
0324 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
0325 enum dma_data_direction dir)
0326 {
0327 const struct dma_map_ops *ops = get_dma_ops(dev);
0328
0329 BUG_ON(!valid_dma_direction(dir));
0330 if (dma_map_direct(dev, ops))
0331 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
0332 else if (ops->sync_single_for_cpu)
0333 ops->sync_single_for_cpu(dev, addr, size, dir);
0334 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
0335 }
0336 EXPORT_SYMBOL(dma_sync_single_for_cpu);
0337
0338 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
0339 size_t size, enum dma_data_direction dir)
0340 {
0341 const struct dma_map_ops *ops = get_dma_ops(dev);
0342
0343 BUG_ON(!valid_dma_direction(dir));
0344 if (dma_map_direct(dev, ops))
0345 dma_direct_sync_single_for_device(dev, addr, size, dir);
0346 else if (ops->sync_single_for_device)
0347 ops->sync_single_for_device(dev, addr, size, dir);
0348 debug_dma_sync_single_for_device(dev, addr, size, dir);
0349 }
0350 EXPORT_SYMBOL(dma_sync_single_for_device);
0351
0352 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
0353 int nelems, enum dma_data_direction dir)
0354 {
0355 const struct dma_map_ops *ops = get_dma_ops(dev);
0356
0357 BUG_ON(!valid_dma_direction(dir));
0358 if (dma_map_direct(dev, ops))
0359 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
0360 else if (ops->sync_sg_for_cpu)
0361 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
0362 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
0363 }
0364 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
0365
0366 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
0367 int nelems, enum dma_data_direction dir)
0368 {
0369 const struct dma_map_ops *ops = get_dma_ops(dev);
0370
0371 BUG_ON(!valid_dma_direction(dir));
0372 if (dma_map_direct(dev, ops))
0373 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
0374 else if (ops->sync_sg_for_device)
0375 ops->sync_sg_for_device(dev, sg, nelems, dir);
0376 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
0377 }
0378 EXPORT_SYMBOL(dma_sync_sg_for_device);
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
0392 void *cpu_addr, dma_addr_t dma_addr, size_t size,
0393 unsigned long attrs)
0394 {
0395 const struct dma_map_ops *ops = get_dma_ops(dev);
0396
0397 if (dma_alloc_direct(dev, ops))
0398 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
0399 size, attrs);
0400 if (!ops->get_sgtable)
0401 return -ENXIO;
0402 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
0403 }
0404 EXPORT_SYMBOL(dma_get_sgtable_attrs);
0405
0406 #ifdef CONFIG_MMU
0407
0408
0409
0410
0411 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
0412 {
0413 if (dev_is_dma_coherent(dev))
0414 return prot;
0415 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
0416 if (attrs & DMA_ATTR_WRITE_COMBINE)
0417 return pgprot_writecombine(prot);
0418 #endif
0419 return pgprot_dmacoherent(prot);
0420 }
0421 #endif
0422
0423
0424
0425
0426
0427
0428
0429
0430 bool dma_can_mmap(struct device *dev)
0431 {
0432 const struct dma_map_ops *ops = get_dma_ops(dev);
0433
0434 if (dma_alloc_direct(dev, ops))
0435 return dma_direct_can_mmap(dev);
0436 return ops->mmap != NULL;
0437 }
0438 EXPORT_SYMBOL_GPL(dma_can_mmap);
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
0454 void *cpu_addr, dma_addr_t dma_addr, size_t size,
0455 unsigned long attrs)
0456 {
0457 const struct dma_map_ops *ops = get_dma_ops(dev);
0458
0459 if (dma_alloc_direct(dev, ops))
0460 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
0461 attrs);
0462 if (!ops->mmap)
0463 return -ENXIO;
0464 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
0465 }
0466 EXPORT_SYMBOL(dma_mmap_attrs);
0467
0468 u64 dma_get_required_mask(struct device *dev)
0469 {
0470 const struct dma_map_ops *ops = get_dma_ops(dev);
0471
0472 if (dma_alloc_direct(dev, ops))
0473 return dma_direct_get_required_mask(dev);
0474 if (ops->get_required_mask)
0475 return ops->get_required_mask(dev);
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485 return DMA_BIT_MASK(32);
0486 }
0487 EXPORT_SYMBOL_GPL(dma_get_required_mask);
0488
0489 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
0490 gfp_t flag, unsigned long attrs)
0491 {
0492 const struct dma_map_ops *ops = get_dma_ops(dev);
0493 void *cpu_addr;
0494
0495 WARN_ON_ONCE(!dev->coherent_dma_mask);
0496
0497 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
0498 return cpu_addr;
0499
0500
0501 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
0502
0503 if (dma_alloc_direct(dev, ops))
0504 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
0505 else if (ops->alloc)
0506 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
0507 else
0508 return NULL;
0509
0510 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
0511 return cpu_addr;
0512 }
0513 EXPORT_SYMBOL(dma_alloc_attrs);
0514
0515 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
0516 dma_addr_t dma_handle, unsigned long attrs)
0517 {
0518 const struct dma_map_ops *ops = get_dma_ops(dev);
0519
0520 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
0521 return;
0522
0523
0524
0525
0526
0527
0528
0529 WARN_ON(irqs_disabled());
0530
0531 if (!cpu_addr)
0532 return;
0533
0534 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
0535 if (dma_alloc_direct(dev, ops))
0536 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
0537 else if (ops->free)
0538 ops->free(dev, size, cpu_addr, dma_handle, attrs);
0539 }
0540 EXPORT_SYMBOL(dma_free_attrs);
0541
0542 static struct page *__dma_alloc_pages(struct device *dev, size_t size,
0543 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
0544 {
0545 const struct dma_map_ops *ops = get_dma_ops(dev);
0546
0547 if (WARN_ON_ONCE(!dev->coherent_dma_mask))
0548 return NULL;
0549 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
0550 return NULL;
0551
0552 size = PAGE_ALIGN(size);
0553 if (dma_alloc_direct(dev, ops))
0554 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
0555 if (!ops->alloc_pages)
0556 return NULL;
0557 return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
0558 }
0559
0560 struct page *dma_alloc_pages(struct device *dev, size_t size,
0561 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
0562 {
0563 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
0564
0565 if (page)
0566 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
0567 return page;
0568 }
0569 EXPORT_SYMBOL_GPL(dma_alloc_pages);
0570
0571 static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
0572 dma_addr_t dma_handle, enum dma_data_direction dir)
0573 {
0574 const struct dma_map_ops *ops = get_dma_ops(dev);
0575
0576 size = PAGE_ALIGN(size);
0577 if (dma_alloc_direct(dev, ops))
0578 dma_direct_free_pages(dev, size, page, dma_handle, dir);
0579 else if (ops->free_pages)
0580 ops->free_pages(dev, size, page, dma_handle, dir);
0581 }
0582
0583 void dma_free_pages(struct device *dev, size_t size, struct page *page,
0584 dma_addr_t dma_handle, enum dma_data_direction dir)
0585 {
0586 debug_dma_unmap_page(dev, dma_handle, size, dir);
0587 __dma_free_pages(dev, size, page, dma_handle, dir);
0588 }
0589 EXPORT_SYMBOL_GPL(dma_free_pages);
0590
0591 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
0592 size_t size, struct page *page)
0593 {
0594 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
0595
0596 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
0597 return -ENXIO;
0598 return remap_pfn_range(vma, vma->vm_start,
0599 page_to_pfn(page) + vma->vm_pgoff,
0600 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
0601 }
0602 EXPORT_SYMBOL_GPL(dma_mmap_pages);
0603
0604 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
0605 enum dma_data_direction dir, gfp_t gfp)
0606 {
0607 struct sg_table *sgt;
0608 struct page *page;
0609
0610 sgt = kmalloc(sizeof(*sgt), gfp);
0611 if (!sgt)
0612 return NULL;
0613 if (sg_alloc_table(sgt, 1, gfp))
0614 goto out_free_sgt;
0615 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
0616 if (!page)
0617 goto out_free_table;
0618 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
0619 sg_dma_len(sgt->sgl) = sgt->sgl->length;
0620 return sgt;
0621 out_free_table:
0622 sg_free_table(sgt);
0623 out_free_sgt:
0624 kfree(sgt);
0625 return NULL;
0626 }
0627
0628 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
0629 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
0630 {
0631 const struct dma_map_ops *ops = get_dma_ops(dev);
0632 struct sg_table *sgt;
0633
0634 if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
0635 return NULL;
0636
0637 if (ops && ops->alloc_noncontiguous)
0638 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
0639 else
0640 sgt = alloc_single_sgt(dev, size, dir, gfp);
0641
0642 if (sgt) {
0643 sgt->nents = 1;
0644 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
0645 }
0646 return sgt;
0647 }
0648 EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
0649
0650 static void free_single_sgt(struct device *dev, size_t size,
0651 struct sg_table *sgt, enum dma_data_direction dir)
0652 {
0653 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
0654 dir);
0655 sg_free_table(sgt);
0656 kfree(sgt);
0657 }
0658
0659 void dma_free_noncontiguous(struct device *dev, size_t size,
0660 struct sg_table *sgt, enum dma_data_direction dir)
0661 {
0662 const struct dma_map_ops *ops = get_dma_ops(dev);
0663
0664 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
0665 if (ops && ops->free_noncontiguous)
0666 ops->free_noncontiguous(dev, size, sgt, dir);
0667 else
0668 free_single_sgt(dev, size, sgt, dir);
0669 }
0670 EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
0671
0672 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
0673 struct sg_table *sgt)
0674 {
0675 const struct dma_map_ops *ops = get_dma_ops(dev);
0676 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
0677
0678 if (ops && ops->alloc_noncontiguous)
0679 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
0680 return page_address(sg_page(sgt->sgl));
0681 }
0682 EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
0683
0684 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
0685 {
0686 const struct dma_map_ops *ops = get_dma_ops(dev);
0687
0688 if (ops && ops->alloc_noncontiguous)
0689 vunmap(vaddr);
0690 }
0691 EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
0692
0693 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
0694 size_t size, struct sg_table *sgt)
0695 {
0696 const struct dma_map_ops *ops = get_dma_ops(dev);
0697
0698 if (ops && ops->alloc_noncontiguous) {
0699 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
0700
0701 if (vma->vm_pgoff >= count ||
0702 vma_pages(vma) > count - vma->vm_pgoff)
0703 return -ENXIO;
0704 return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
0705 }
0706 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
0707 }
0708 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
0709
0710 static int dma_supported(struct device *dev, u64 mask)
0711 {
0712 const struct dma_map_ops *ops = get_dma_ops(dev);
0713
0714
0715
0716
0717
0718 if (!ops)
0719 return dma_direct_supported(dev, mask);
0720 if (!ops->dma_supported)
0721 return 1;
0722 return ops->dma_supported(dev, mask);
0723 }
0724
0725 bool dma_pci_p2pdma_supported(struct device *dev)
0726 {
0727 const struct dma_map_ops *ops = get_dma_ops(dev);
0728
0729
0730 if (!ops)
0731 return true;
0732
0733
0734
0735
0736
0737
0738
0739 return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
0740 }
0741 EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
0742
0743 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
0744 void arch_dma_set_mask(struct device *dev, u64 mask);
0745 #else
0746 #define arch_dma_set_mask(dev, mask) do { } while (0)
0747 #endif
0748
0749 int dma_set_mask(struct device *dev, u64 mask)
0750 {
0751
0752
0753
0754
0755 mask = (dma_addr_t)mask;
0756
0757 if (!dev->dma_mask || !dma_supported(dev, mask))
0758 return -EIO;
0759
0760 arch_dma_set_mask(dev, mask);
0761 *dev->dma_mask = mask;
0762 return 0;
0763 }
0764 EXPORT_SYMBOL(dma_set_mask);
0765
0766 int dma_set_coherent_mask(struct device *dev, u64 mask)
0767 {
0768
0769
0770
0771
0772 mask = (dma_addr_t)mask;
0773
0774 if (!dma_supported(dev, mask))
0775 return -EIO;
0776
0777 dev->coherent_dma_mask = mask;
0778 return 0;
0779 }
0780 EXPORT_SYMBOL(dma_set_coherent_mask);
0781
0782 size_t dma_max_mapping_size(struct device *dev)
0783 {
0784 const struct dma_map_ops *ops = get_dma_ops(dev);
0785 size_t size = SIZE_MAX;
0786
0787 if (dma_map_direct(dev, ops))
0788 size = dma_direct_max_mapping_size(dev);
0789 else if (ops && ops->max_mapping_size)
0790 size = ops->max_mapping_size(dev);
0791
0792 return size;
0793 }
0794 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
0795
0796 size_t dma_opt_mapping_size(struct device *dev)
0797 {
0798 const struct dma_map_ops *ops = get_dma_ops(dev);
0799 size_t size = SIZE_MAX;
0800
0801 if (ops && ops->opt_mapping_size)
0802 size = ops->opt_mapping_size();
0803
0804 return min(dma_max_mapping_size(dev), size);
0805 }
0806 EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
0807
0808 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
0809 {
0810 const struct dma_map_ops *ops = get_dma_ops(dev);
0811
0812 if (dma_map_direct(dev, ops))
0813 return dma_direct_need_sync(dev, dma_addr);
0814 return ops->sync_single_for_cpu || ops->sync_single_for_device;
0815 }
0816 EXPORT_SYMBOL_GPL(dma_need_sync);
0817
0818 unsigned long dma_get_merge_boundary(struct device *dev)
0819 {
0820 const struct dma_map_ops *ops = get_dma_ops(dev);
0821
0822 if (!ops || !ops->get_merge_boundary)
0823 return 0;
0824
0825 return ops->get_merge_boundary(dev);
0826 }
0827 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);