0001
0002 #ifndef _LINUX_DMA_MAPPING_H
0003 #define _LINUX_DMA_MAPPING_H
0004
0005 #include <linux/sizes.h>
0006 #include <linux/string.h>
0007 #include <linux/device.h>
0008 #include <linux/err.h>
0009 #include <linux/dma-direction.h>
0010 #include <linux/scatterlist.h>
0011 #include <linux/bug.h>
0012 #include <linux/mem_encrypt.h>
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
0024
0025
0026
0027
0028 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
0029
0030
0031
0032
0033 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
0034
0035
0036
0037
0038
0039 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
0040
0041
0042
0043
0044 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
0045
0046
0047
0048
0049
0050 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
0051
0052
0053
0054
0055 #define DMA_ATTR_NO_WARN (1UL << 8)
0056
0057
0058
0059
0060
0061
0062 #define DMA_ATTR_PRIVILEGED (1UL << 9)
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
0075
0076 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
0077
0078 #ifdef CONFIG_DMA_API_DEBUG
0079 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
0080 void debug_dma_map_single(struct device *dev, const void *addr,
0081 unsigned long len);
0082 #else
0083 static inline void debug_dma_mapping_error(struct device *dev,
0084 dma_addr_t dma_addr)
0085 {
0086 }
0087 static inline void debug_dma_map_single(struct device *dev, const void *addr,
0088 unsigned long len)
0089 {
0090 }
0091 #endif
0092
0093 #ifdef CONFIG_HAS_DMA
0094 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
0095 {
0096 debug_dma_mapping_error(dev, dma_addr);
0097
0098 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
0099 return -ENOMEM;
0100 return 0;
0101 }
0102
0103 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
0104 size_t offset, size_t size, enum dma_data_direction dir,
0105 unsigned long attrs);
0106 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
0107 enum dma_data_direction dir, unsigned long attrs);
0108 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
0109 int nents, enum dma_data_direction dir, unsigned long attrs);
0110 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
0111 int nents, enum dma_data_direction dir,
0112 unsigned long attrs);
0113 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
0114 enum dma_data_direction dir, unsigned long attrs);
0115 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
0116 size_t size, enum dma_data_direction dir, unsigned long attrs);
0117 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
0118 enum dma_data_direction dir, unsigned long attrs);
0119 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
0120 enum dma_data_direction dir);
0121 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
0122 size_t size, enum dma_data_direction dir);
0123 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
0124 int nelems, enum dma_data_direction dir);
0125 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
0126 int nelems, enum dma_data_direction dir);
0127 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
0128 gfp_t flag, unsigned long attrs);
0129 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
0130 dma_addr_t dma_handle, unsigned long attrs);
0131 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
0132 gfp_t gfp, unsigned long attrs);
0133 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
0134 dma_addr_t dma_handle);
0135 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
0136 void *cpu_addr, dma_addr_t dma_addr, size_t size,
0137 unsigned long attrs);
0138 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
0139 void *cpu_addr, dma_addr_t dma_addr, size_t size,
0140 unsigned long attrs);
0141 bool dma_can_mmap(struct device *dev);
0142 bool dma_pci_p2pdma_supported(struct device *dev);
0143 int dma_set_mask(struct device *dev, u64 mask);
0144 int dma_set_coherent_mask(struct device *dev, u64 mask);
0145 u64 dma_get_required_mask(struct device *dev);
0146 size_t dma_max_mapping_size(struct device *dev);
0147 size_t dma_opt_mapping_size(struct device *dev);
0148 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
0149 unsigned long dma_get_merge_boundary(struct device *dev);
0150 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
0151 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
0152 void dma_free_noncontiguous(struct device *dev, size_t size,
0153 struct sg_table *sgt, enum dma_data_direction dir);
0154 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
0155 struct sg_table *sgt);
0156 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
0157 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
0158 size_t size, struct sg_table *sgt);
0159 #else
0160 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
0161 struct page *page, size_t offset, size_t size,
0162 enum dma_data_direction dir, unsigned long attrs)
0163 {
0164 return DMA_MAPPING_ERROR;
0165 }
0166 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
0167 size_t size, enum dma_data_direction dir, unsigned long attrs)
0168 {
0169 }
0170 static inline unsigned int dma_map_sg_attrs(struct device *dev,
0171 struct scatterlist *sg, int nents, enum dma_data_direction dir,
0172 unsigned long attrs)
0173 {
0174 return 0;
0175 }
0176 static inline void dma_unmap_sg_attrs(struct device *dev,
0177 struct scatterlist *sg, int nents, enum dma_data_direction dir,
0178 unsigned long attrs)
0179 {
0180 }
0181 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
0182 enum dma_data_direction dir, unsigned long attrs)
0183 {
0184 return -EOPNOTSUPP;
0185 }
0186 static inline dma_addr_t dma_map_resource(struct device *dev,
0187 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
0188 unsigned long attrs)
0189 {
0190 return DMA_MAPPING_ERROR;
0191 }
0192 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
0193 size_t size, enum dma_data_direction dir, unsigned long attrs)
0194 {
0195 }
0196 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
0197 size_t size, enum dma_data_direction dir)
0198 {
0199 }
0200 static inline void dma_sync_single_for_device(struct device *dev,
0201 dma_addr_t addr, size_t size, enum dma_data_direction dir)
0202 {
0203 }
0204 static inline void dma_sync_sg_for_cpu(struct device *dev,
0205 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
0206 {
0207 }
0208 static inline void dma_sync_sg_for_device(struct device *dev,
0209 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
0210 {
0211 }
0212 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
0213 {
0214 return -ENOMEM;
0215 }
0216 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
0217 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
0218 {
0219 return NULL;
0220 }
0221 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
0222 dma_addr_t dma_handle, unsigned long attrs)
0223 {
0224 }
0225 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
0226 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
0227 {
0228 return NULL;
0229 }
0230 static inline void dmam_free_coherent(struct device *dev, size_t size,
0231 void *vaddr, dma_addr_t dma_handle)
0232 {
0233 }
0234 static inline int dma_get_sgtable_attrs(struct device *dev,
0235 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
0236 size_t size, unsigned long attrs)
0237 {
0238 return -ENXIO;
0239 }
0240 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
0241 void *cpu_addr, dma_addr_t dma_addr, size_t size,
0242 unsigned long attrs)
0243 {
0244 return -ENXIO;
0245 }
0246 static inline bool dma_can_mmap(struct device *dev)
0247 {
0248 return false;
0249 }
0250 static inline bool dma_pci_p2pdma_supported(struct device *dev)
0251 {
0252 return false;
0253 }
0254 static inline int dma_set_mask(struct device *dev, u64 mask)
0255 {
0256 return -EIO;
0257 }
0258 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
0259 {
0260 return -EIO;
0261 }
0262 static inline u64 dma_get_required_mask(struct device *dev)
0263 {
0264 return 0;
0265 }
0266 static inline size_t dma_max_mapping_size(struct device *dev)
0267 {
0268 return 0;
0269 }
0270 static inline size_t dma_opt_mapping_size(struct device *dev)
0271 {
0272 return 0;
0273 }
0274 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
0275 {
0276 return false;
0277 }
0278 static inline unsigned long dma_get_merge_boundary(struct device *dev)
0279 {
0280 return 0;
0281 }
0282 static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
0283 size_t size, enum dma_data_direction dir, gfp_t gfp,
0284 unsigned long attrs)
0285 {
0286 return NULL;
0287 }
0288 static inline void dma_free_noncontiguous(struct device *dev, size_t size,
0289 struct sg_table *sgt, enum dma_data_direction dir)
0290 {
0291 }
0292 static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
0293 struct sg_table *sgt)
0294 {
0295 return NULL;
0296 }
0297 static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
0298 {
0299 }
0300 static inline int dma_mmap_noncontiguous(struct device *dev,
0301 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
0302 {
0303 return -EINVAL;
0304 }
0305 #endif
0306
0307 struct page *dma_alloc_pages(struct device *dev, size_t size,
0308 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
0309 void dma_free_pages(struct device *dev, size_t size, struct page *page,
0310 dma_addr_t dma_handle, enum dma_data_direction dir);
0311 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
0312 size_t size, struct page *page);
0313
0314 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
0315 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
0316 {
0317 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
0318 return page ? page_address(page) : NULL;
0319 }
0320
0321 static inline void dma_free_noncoherent(struct device *dev, size_t size,
0322 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
0323 {
0324 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
0325 }
0326
0327 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
0328 size_t size, enum dma_data_direction dir, unsigned long attrs)
0329 {
0330
0331 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
0332 "rejecting DMA map of vmalloc memory\n"))
0333 return DMA_MAPPING_ERROR;
0334 debug_dma_map_single(dev, ptr, size);
0335 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
0336 size, dir, attrs);
0337 }
0338
0339 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
0340 size_t size, enum dma_data_direction dir, unsigned long attrs)
0341 {
0342 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
0343 }
0344
0345 static inline void dma_sync_single_range_for_cpu(struct device *dev,
0346 dma_addr_t addr, unsigned long offset, size_t size,
0347 enum dma_data_direction dir)
0348 {
0349 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
0350 }
0351
0352 static inline void dma_sync_single_range_for_device(struct device *dev,
0353 dma_addr_t addr, unsigned long offset, size_t size,
0354 enum dma_data_direction dir)
0355 {
0356 return dma_sync_single_for_device(dev, addr + offset, size, dir);
0357 }
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
0371 enum dma_data_direction dir, unsigned long attrs)
0372 {
0373 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
0389 struct sg_table *sgt, enum dma_data_direction dir)
0390 {
0391 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
0392 }
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 static inline void dma_sync_sgtable_for_device(struct device *dev,
0406 struct sg_table *sgt, enum dma_data_direction dir)
0407 {
0408 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
0409 }
0410
0411 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
0412 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
0413 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
0414 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
0415 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
0416 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
0417 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
0418 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
0419
0420 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
0421 dma_addr_t *dma_handle, gfp_t gfp)
0422 {
0423 return dma_alloc_attrs(dev, size, dma_handle, gfp,
0424 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
0425 }
0426
0427 static inline void dma_free_coherent(struct device *dev, size_t size,
0428 void *cpu_addr, dma_addr_t dma_handle)
0429 {
0430 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
0431 }
0432
0433
0434 static inline u64 dma_get_mask(struct device *dev)
0435 {
0436 if (dev->dma_mask && *dev->dma_mask)
0437 return *dev->dma_mask;
0438 return DMA_BIT_MASK(32);
0439 }
0440
0441
0442
0443
0444
0445
0446
0447 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
0448 {
0449 int rc = dma_set_mask(dev, mask);
0450 if (rc == 0)
0451 dma_set_coherent_mask(dev, mask);
0452 return rc;
0453 }
0454
0455
0456
0457
0458
0459 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
0460 {
0461 dev->dma_mask = &dev->coherent_dma_mask;
0462 return dma_set_mask_and_coherent(dev, mask);
0463 }
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 static inline bool dma_addressing_limited(struct device *dev)
0474 {
0475 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
0476 dma_get_required_mask(dev);
0477 }
0478
0479 static inline unsigned int dma_get_max_seg_size(struct device *dev)
0480 {
0481 if (dev->dma_parms && dev->dma_parms->max_segment_size)
0482 return dev->dma_parms->max_segment_size;
0483 return SZ_64K;
0484 }
0485
0486 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
0487 {
0488 if (dev->dma_parms) {
0489 dev->dma_parms->max_segment_size = size;
0490 return 0;
0491 }
0492 return -EIO;
0493 }
0494
0495 static inline unsigned long dma_get_seg_boundary(struct device *dev)
0496 {
0497 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
0498 return dev->dma_parms->segment_boundary_mask;
0499 return ULONG_MAX;
0500 }
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
0514 unsigned int page_shift)
0515 {
0516 if (!dev)
0517 return (U32_MAX >> page_shift) + 1;
0518 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
0519 }
0520
0521 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
0522 {
0523 if (dev->dma_parms) {
0524 dev->dma_parms->segment_boundary_mask = mask;
0525 return 0;
0526 }
0527 return -EIO;
0528 }
0529
0530 static inline unsigned int dma_get_min_align_mask(struct device *dev)
0531 {
0532 if (dev->dma_parms)
0533 return dev->dma_parms->min_align_mask;
0534 return 0;
0535 }
0536
0537 static inline int dma_set_min_align_mask(struct device *dev,
0538 unsigned int min_align_mask)
0539 {
0540 if (WARN_ON_ONCE(!dev->dma_parms))
0541 return -EIO;
0542 dev->dma_parms->min_align_mask = min_align_mask;
0543 return 0;
0544 }
0545
0546 static inline int dma_get_cache_alignment(void)
0547 {
0548 #ifdef ARCH_DMA_MINALIGN
0549 return ARCH_DMA_MINALIGN;
0550 #endif
0551 return 1;
0552 }
0553
0554 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
0555 dma_addr_t *dma_handle, gfp_t gfp)
0556 {
0557 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
0558 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
0559 }
0560
0561 static inline void *dma_alloc_wc(struct device *dev, size_t size,
0562 dma_addr_t *dma_addr, gfp_t gfp)
0563 {
0564 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
0565
0566 if (gfp & __GFP_NOWARN)
0567 attrs |= DMA_ATTR_NO_WARN;
0568
0569 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
0570 }
0571
0572 static inline void dma_free_wc(struct device *dev, size_t size,
0573 void *cpu_addr, dma_addr_t dma_addr)
0574 {
0575 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
0576 DMA_ATTR_WRITE_COMBINE);
0577 }
0578
0579 static inline int dma_mmap_wc(struct device *dev,
0580 struct vm_area_struct *vma,
0581 void *cpu_addr, dma_addr_t dma_addr,
0582 size_t size)
0583 {
0584 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
0585 DMA_ATTR_WRITE_COMBINE);
0586 }
0587
0588 #ifdef CONFIG_NEED_DMA_MAP_STATE
0589 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
0590 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
0591 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
0592 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
0593 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
0594 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
0595 #else
0596 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
0597 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
0598 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
0599 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
0600 #define dma_unmap_len(PTR, LEN_NAME) (0)
0601 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
0602 #endif
0603
0604 #endif