0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/slab.h>
0010 #include <linux/mm.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/genalloc.h>
0013 #include <linux/highmem.h>
0014 #include <linux/vmalloc.h>
0015 #ifdef CONFIG_X86
0016 #include <asm/set_memory.h>
0017 #endif
0018 #include <sound/memalloc.h>
0019 #include "memalloc_local.h"
0020
0021 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
0022
0023 #ifdef CONFIG_SND_DMA_SGBUF
0024 static void *do_alloc_fallback_pages(struct device *dev, size_t size,
0025 dma_addr_t *addr, bool wc);
0026 static void do_free_fallback_pages(void *p, size_t size, bool wc);
0027 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
0028 #endif
0029
0030
0031 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
0032 gfp_t default_gfp)
0033 {
0034 if (!dmab->dev.dev)
0035 return default_gfp;
0036 else
0037 return (__force gfp_t)(unsigned long)dmab->dev.dev;
0038 }
0039
0040 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
0041 {
0042 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
0043
0044 if (WARN_ON_ONCE(!ops || !ops->alloc))
0045 return NULL;
0046 return ops->alloc(dmab, size);
0047 }
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 int snd_dma_alloc_dir_pages(int type, struct device *device,
0065 enum dma_data_direction dir, size_t size,
0066 struct snd_dma_buffer *dmab)
0067 {
0068 if (WARN_ON(!size))
0069 return -ENXIO;
0070 if (WARN_ON(!dmab))
0071 return -ENXIO;
0072
0073 size = PAGE_ALIGN(size);
0074 dmab->dev.type = type;
0075 dmab->dev.dev = device;
0076 dmab->dev.dir = dir;
0077 dmab->bytes = 0;
0078 dmab->addr = 0;
0079 dmab->private_data = NULL;
0080 dmab->area = __snd_dma_alloc_pages(dmab, size);
0081 if (!dmab->area)
0082 return -ENOMEM;
0083 dmab->bytes = size;
0084 return 0;
0085 }
0086 EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
0104 struct snd_dma_buffer *dmab)
0105 {
0106 int err;
0107
0108 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
0109 if (err != -ENOMEM)
0110 return err;
0111 if (size <= PAGE_SIZE)
0112 return -ENOMEM;
0113 size >>= 1;
0114 size = PAGE_SIZE << get_order(size);
0115 }
0116 if (! dmab->area)
0117 return -ENOMEM;
0118 return 0;
0119 }
0120 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
0121
0122
0123
0124
0125
0126
0127
0128 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
0129 {
0130 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
0131
0132 if (ops && ops->free)
0133 ops->free(dmab);
0134 }
0135 EXPORT_SYMBOL(snd_dma_free_pages);
0136
0137
0138 static void __snd_release_pages(struct device *dev, void *res)
0139 {
0140 snd_dma_free_pages(res);
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159 struct snd_dma_buffer *
0160 snd_devm_alloc_dir_pages(struct device *dev, int type,
0161 enum dma_data_direction dir, size_t size)
0162 {
0163 struct snd_dma_buffer *dmab;
0164 int err;
0165
0166 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
0167 type == SNDRV_DMA_TYPE_VMALLOC))
0168 return NULL;
0169
0170 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
0171 if (!dmab)
0172 return NULL;
0173
0174 err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
0175 if (err < 0) {
0176 devres_free(dmab);
0177 return NULL;
0178 }
0179
0180 devres_add(dev, dmab);
0181 return dmab;
0182 }
0183 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
0184
0185
0186
0187
0188
0189
0190
0191
0192 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
0193 struct vm_area_struct *area)
0194 {
0195 const struct snd_malloc_ops *ops;
0196
0197 if (!dmab)
0198 return -ENOENT;
0199 ops = snd_dma_get_ops(dmab);
0200 if (ops && ops->mmap)
0201 return ops->mmap(dmab, area);
0202 else
0203 return -ENOENT;
0204 }
0205 EXPORT_SYMBOL(snd_dma_buffer_mmap);
0206
0207 #ifdef CONFIG_HAS_DMA
0208
0209
0210
0211
0212
0213 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
0214 enum snd_dma_sync_mode mode)
0215 {
0216 const struct snd_malloc_ops *ops;
0217
0218 if (!dmab || !dmab->dev.need_sync)
0219 return;
0220 ops = snd_dma_get_ops(dmab);
0221 if (ops && ops->sync)
0222 ops->sync(dmab, mode);
0223 }
0224 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
0225 #endif
0226
0227
0228
0229
0230
0231
0232
0233
0234 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
0235 {
0236 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
0237
0238 if (ops && ops->get_addr)
0239 return ops->get_addr(dmab, offset);
0240 else
0241 return dmab->addr + offset;
0242 }
0243 EXPORT_SYMBOL(snd_sgbuf_get_addr);
0244
0245
0246
0247
0248
0249
0250
0251
0252 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
0253 {
0254 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
0255
0256 if (ops && ops->get_page)
0257 return ops->get_page(dmab, offset);
0258 else
0259 return virt_to_page(dmab->area + offset);
0260 }
0261 EXPORT_SYMBOL(snd_sgbuf_get_page);
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
0273 unsigned int ofs, unsigned int size)
0274 {
0275 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
0276
0277 if (ops && ops->get_chunk_size)
0278 return ops->get_chunk_size(dmab, ofs, size);
0279 else
0280 return size;
0281 }
0282 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
0283
0284
0285
0286
0287 static void *do_alloc_pages(size_t size, dma_addr_t *addr, gfp_t gfp)
0288 {
0289 void *p = alloc_pages_exact(size, gfp);
0290
0291 if (p)
0292 *addr = page_to_phys(virt_to_page(p));
0293 return p;
0294 }
0295
0296 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
0297 {
0298 return do_alloc_pages(size, &dmab->addr,
0299 snd_mem_get_gfp_flags(dmab, GFP_KERNEL));
0300 }
0301
0302 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
0303 {
0304 free_pages_exact(dmab->area, dmab->bytes);
0305 }
0306
0307 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
0308 struct vm_area_struct *area)
0309 {
0310 return remap_pfn_range(area, area->vm_start,
0311 dmab->addr >> PAGE_SHIFT,
0312 area->vm_end - area->vm_start,
0313 area->vm_page_prot);
0314 }
0315
0316 static const struct snd_malloc_ops snd_dma_continuous_ops = {
0317 .alloc = snd_dma_continuous_alloc,
0318 .free = snd_dma_continuous_free,
0319 .mmap = snd_dma_continuous_mmap,
0320 };
0321
0322
0323
0324
0325 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
0326 {
0327 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
0328
0329 return __vmalloc(size, gfp);
0330 }
0331
0332 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
0333 {
0334 vfree(dmab->area);
0335 }
0336
0337 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
0338 struct vm_area_struct *area)
0339 {
0340 return remap_vmalloc_range(area, dmab->area, 0);
0341 }
0342
0343 #define get_vmalloc_page_addr(dmab, offset) \
0344 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
0345
0346 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
0347 size_t offset)
0348 {
0349 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
0350 }
0351
0352 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
0353 size_t offset)
0354 {
0355 return vmalloc_to_page(dmab->area + offset);
0356 }
0357
0358 static unsigned int
0359 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
0360 unsigned int ofs, unsigned int size)
0361 {
0362 unsigned int start, end;
0363 unsigned long addr;
0364
0365 start = ALIGN_DOWN(ofs, PAGE_SIZE);
0366 end = ofs + size - 1;
0367
0368 addr = get_vmalloc_page_addr(dmab, start);
0369 for (;;) {
0370 start += PAGE_SIZE;
0371 if (start > end)
0372 break;
0373 addr += PAGE_SIZE;
0374 if (get_vmalloc_page_addr(dmab, start) != addr)
0375 return start - ofs;
0376 }
0377
0378 return size;
0379 }
0380
0381 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
0382 .alloc = snd_dma_vmalloc_alloc,
0383 .free = snd_dma_vmalloc_free,
0384 .mmap = snd_dma_vmalloc_mmap,
0385 .get_addr = snd_dma_vmalloc_get_addr,
0386 .get_page = snd_dma_vmalloc_get_page,
0387 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
0388 };
0389
0390 #ifdef CONFIG_HAS_DMA
0391
0392
0393
0394 #ifdef CONFIG_GENERIC_ALLOCATOR
0395 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
0396 {
0397 struct device *dev = dmab->dev.dev;
0398 struct gen_pool *pool;
0399 void *p;
0400
0401 if (dev->of_node) {
0402 pool = of_gen_pool_get(dev->of_node, "iram", 0);
0403
0404 dmab->private_data = pool;
0405
0406 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
0407 if (p)
0408 return p;
0409 }
0410
0411
0412
0413
0414 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
0415 return __snd_dma_alloc_pages(dmab, size);
0416 }
0417
0418 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
0419 {
0420 struct gen_pool *pool = dmab->private_data;
0421
0422 if (pool && dmab->area)
0423 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
0424 }
0425
0426 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
0427 struct vm_area_struct *area)
0428 {
0429 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
0430 return remap_pfn_range(area, area->vm_start,
0431 dmab->addr >> PAGE_SHIFT,
0432 area->vm_end - area->vm_start,
0433 area->vm_page_prot);
0434 }
0435
0436 static const struct snd_malloc_ops snd_dma_iram_ops = {
0437 .alloc = snd_dma_iram_alloc,
0438 .free = snd_dma_iram_free,
0439 .mmap = snd_dma_iram_mmap,
0440 };
0441 #endif
0442
0443 #define DEFAULT_GFP \
0444 (GFP_KERNEL | \
0445 __GFP_COMP | \
0446 __GFP_NORETRY | \
0447 __GFP_NOWARN)
0448
0449
0450
0451
0452 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
0453 {
0454 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
0455 }
0456
0457 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
0458 {
0459 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
0460 }
0461
0462 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
0463 struct vm_area_struct *area)
0464 {
0465 return dma_mmap_coherent(dmab->dev.dev, area,
0466 dmab->area, dmab->addr, dmab->bytes);
0467 }
0468
0469 static const struct snd_malloc_ops snd_dma_dev_ops = {
0470 .alloc = snd_dma_dev_alloc,
0471 .free = snd_dma_dev_free,
0472 .mmap = snd_dma_dev_mmap,
0473 };
0474
0475
0476
0477
0478
0479 #ifdef CONFIG_SND_DMA_SGBUF
0480 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
0481 {
0482 return do_alloc_fallback_pages(dmab->dev.dev, size, &dmab->addr, true);
0483 }
0484
0485 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
0486 {
0487 do_free_fallback_pages(dmab->area, dmab->bytes, true);
0488 }
0489
0490 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
0491 struct vm_area_struct *area)
0492 {
0493 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
0494 return snd_dma_continuous_mmap(dmab, area);
0495 }
0496 #else
0497 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
0498 {
0499 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
0500 }
0501
0502 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
0503 {
0504 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
0505 }
0506
0507 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
0508 struct vm_area_struct *area)
0509 {
0510 return dma_mmap_wc(dmab->dev.dev, area,
0511 dmab->area, dmab->addr, dmab->bytes);
0512 }
0513 #endif
0514
0515 static const struct snd_malloc_ops snd_dma_wc_ops = {
0516 .alloc = snd_dma_wc_alloc,
0517 .free = snd_dma_wc_free,
0518 .mmap = snd_dma_wc_mmap,
0519 };
0520
0521
0522
0523
0524 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
0525 {
0526 struct sg_table *sgt;
0527 void *p;
0528
0529 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
0530 DEFAULT_GFP, 0);
0531 if (!sgt) {
0532 #ifdef CONFIG_SND_DMA_SGBUF
0533 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
0534 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
0535 else
0536 dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
0537 return snd_dma_sg_fallback_alloc(dmab, size);
0538 #else
0539 return NULL;
0540 #endif
0541 }
0542
0543 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
0544 sg_dma_address(sgt->sgl));
0545 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
0546 if (p) {
0547 dmab->private_data = sgt;
0548
0549 dmab->addr = snd_sgbuf_get_addr(dmab, 0);
0550 } else {
0551 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
0552 }
0553 return p;
0554 }
0555
0556 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
0557 {
0558 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
0559 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
0560 dmab->dev.dir);
0561 }
0562
0563 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
0564 struct vm_area_struct *area)
0565 {
0566 return dma_mmap_noncontiguous(dmab->dev.dev, area,
0567 dmab->bytes, dmab->private_data);
0568 }
0569
0570 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
0571 enum snd_dma_sync_mode mode)
0572 {
0573 if (mode == SNDRV_DMA_SYNC_CPU) {
0574 if (dmab->dev.dir == DMA_TO_DEVICE)
0575 return;
0576 invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
0577 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
0578 dmab->dev.dir);
0579 } else {
0580 if (dmab->dev.dir == DMA_FROM_DEVICE)
0581 return;
0582 flush_kernel_vmap_range(dmab->area, dmab->bytes);
0583 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
0584 dmab->dev.dir);
0585 }
0586 }
0587
0588 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
0589 struct sg_page_iter *piter,
0590 size_t offset)
0591 {
0592 struct sg_table *sgt = dmab->private_data;
0593
0594 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
0595 offset >> PAGE_SHIFT);
0596 }
0597
0598 static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
0599 size_t offset)
0600 {
0601 struct sg_dma_page_iter iter;
0602
0603 snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
0604 __sg_page_iter_dma_next(&iter);
0605 return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
0606 }
0607
0608 static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
0609 size_t offset)
0610 {
0611 struct sg_page_iter iter;
0612
0613 snd_dma_noncontig_iter_set(dmab, &iter, offset);
0614 __sg_page_iter_next(&iter);
0615 return sg_page_iter_page(&iter);
0616 }
0617
0618 static unsigned int
0619 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
0620 unsigned int ofs, unsigned int size)
0621 {
0622 struct sg_dma_page_iter iter;
0623 unsigned int start, end;
0624 unsigned long addr;
0625
0626 start = ALIGN_DOWN(ofs, PAGE_SIZE);
0627 end = ofs + size - 1;
0628 snd_dma_noncontig_iter_set(dmab, &iter.base, start);
0629 if (!__sg_page_iter_dma_next(&iter))
0630 return 0;
0631
0632 addr = sg_page_iter_dma_address(&iter);
0633 for (;;) {
0634 start += PAGE_SIZE;
0635 if (start > end)
0636 break;
0637 addr += PAGE_SIZE;
0638 if (!__sg_page_iter_dma_next(&iter) ||
0639 sg_page_iter_dma_address(&iter) != addr)
0640 return start - ofs;
0641 }
0642
0643 return size;
0644 }
0645
0646 static const struct snd_malloc_ops snd_dma_noncontig_ops = {
0647 .alloc = snd_dma_noncontig_alloc,
0648 .free = snd_dma_noncontig_free,
0649 .mmap = snd_dma_noncontig_mmap,
0650 .sync = snd_dma_noncontig_sync,
0651 .get_addr = snd_dma_noncontig_get_addr,
0652 .get_page = snd_dma_noncontig_get_page,
0653 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
0654 };
0655
0656
0657 #ifdef CONFIG_SND_DMA_SGBUF
0658 #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
0659
0660 static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
0661 {
0662 void *p = snd_dma_noncontig_alloc(dmab, size);
0663 struct sg_table *sgt = dmab->private_data;
0664 struct sg_page_iter iter;
0665
0666 if (!p)
0667 return NULL;
0668 if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
0669 return p;
0670 for_each_sgtable_page(sgt, &iter, 0)
0671 set_memory_wc(sg_wc_address(&iter), 1);
0672 return p;
0673 }
0674
0675 static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
0676 {
0677 struct sg_table *sgt = dmab->private_data;
0678 struct sg_page_iter iter;
0679
0680 for_each_sgtable_page(sgt, &iter, 0)
0681 set_memory_wb(sg_wc_address(&iter), 1);
0682 snd_dma_noncontig_free(dmab);
0683 }
0684
0685 static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
0686 struct vm_area_struct *area)
0687 {
0688 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
0689 return dma_mmap_noncontiguous(dmab->dev.dev, area,
0690 dmab->bytes, dmab->private_data);
0691 }
0692
0693 static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
0694 .alloc = snd_dma_sg_wc_alloc,
0695 .free = snd_dma_sg_wc_free,
0696 .mmap = snd_dma_sg_wc_mmap,
0697 .sync = snd_dma_noncontig_sync,
0698 .get_addr = snd_dma_noncontig_get_addr,
0699 .get_page = snd_dma_noncontig_get_page,
0700 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
0701 };
0702
0703
0704 static void *do_alloc_fallback_pages(struct device *dev, size_t size,
0705 dma_addr_t *addr, bool wc)
0706 {
0707 gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
0708 void *p;
0709
0710 again:
0711 p = do_alloc_pages(size, addr, gfp);
0712 if (!p || (*addr + size - 1) & ~dev->coherent_dma_mask) {
0713 if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
0714 gfp |= GFP_DMA32;
0715 goto again;
0716 }
0717 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
0718 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
0719 goto again;
0720 }
0721 }
0722 if (p && wc)
0723 set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
0724 return p;
0725 }
0726
0727 static void do_free_fallback_pages(void *p, size_t size, bool wc)
0728 {
0729 if (wc)
0730 set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
0731 free_pages_exact(p, size);
0732 }
0733
0734
0735 struct snd_dma_sg_fallback {
0736 size_t count;
0737 struct page **pages;
0738 dma_addr_t *addrs;
0739 };
0740
0741 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
0742 struct snd_dma_sg_fallback *sgbuf)
0743 {
0744 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
0745 size_t i;
0746
0747 for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
0748 do_free_fallback_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
0749 kvfree(sgbuf->pages);
0750 kvfree(sgbuf->addrs);
0751 kfree(sgbuf);
0752 }
0753
0754 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
0755 {
0756 struct snd_dma_sg_fallback *sgbuf;
0757 struct page **pages;
0758 size_t i, count;
0759 void *p;
0760 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
0761
0762 sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
0763 if (!sgbuf)
0764 return NULL;
0765 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
0766 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
0767 if (!pages)
0768 goto error;
0769 sgbuf->pages = pages;
0770 sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
0771 if (!sgbuf->addrs)
0772 goto error;
0773
0774 for (i = 0; i < count; sgbuf->count++, i++) {
0775 p = do_alloc_fallback_pages(dmab->dev.dev, PAGE_SIZE,
0776 &sgbuf->addrs[i], wc);
0777 if (!p)
0778 goto error;
0779 sgbuf->pages[i] = virt_to_page(p);
0780 }
0781
0782 p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
0783 if (!p)
0784 goto error;
0785 dmab->private_data = sgbuf;
0786
0787 dmab->addr = snd_sgbuf_get_addr(dmab, 0);
0788 return p;
0789
0790 error:
0791 __snd_dma_sg_fallback_free(dmab, sgbuf);
0792 return NULL;
0793 }
0794
0795 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
0796 {
0797 vunmap(dmab->area);
0798 __snd_dma_sg_fallback_free(dmab, dmab->private_data);
0799 }
0800
0801 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
0802 struct vm_area_struct *area)
0803 {
0804 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
0805
0806 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
0807 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
0808 return vm_map_pages(area, sgbuf->pages, sgbuf->count);
0809 }
0810
0811 static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
0812 .alloc = snd_dma_sg_fallback_alloc,
0813 .free = snd_dma_sg_fallback_free,
0814 .mmap = snd_dma_sg_fallback_mmap,
0815
0816 .get_addr = snd_dma_vmalloc_get_addr,
0817 .get_page = snd_dma_vmalloc_get_page,
0818 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
0819 };
0820 #endif
0821
0822
0823
0824
0825 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
0826 {
0827 void *p;
0828
0829 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
0830 dmab->dev.dir, DEFAULT_GFP);
0831 if (p)
0832 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
0833 return p;
0834 }
0835
0836 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
0837 {
0838 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
0839 dmab->addr, dmab->dev.dir);
0840 }
0841
0842 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
0843 struct vm_area_struct *area)
0844 {
0845 area->vm_page_prot = vm_get_page_prot(area->vm_flags);
0846 return dma_mmap_pages(dmab->dev.dev, area,
0847 area->vm_end - area->vm_start,
0848 virt_to_page(dmab->area));
0849 }
0850
0851 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
0852 enum snd_dma_sync_mode mode)
0853 {
0854 if (mode == SNDRV_DMA_SYNC_CPU) {
0855 if (dmab->dev.dir != DMA_TO_DEVICE)
0856 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
0857 dmab->bytes, dmab->dev.dir);
0858 } else {
0859 if (dmab->dev.dir != DMA_FROM_DEVICE)
0860 dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
0861 dmab->bytes, dmab->dev.dir);
0862 }
0863 }
0864
0865 static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
0866 .alloc = snd_dma_noncoherent_alloc,
0867 .free = snd_dma_noncoherent_free,
0868 .mmap = snd_dma_noncoherent_mmap,
0869 .sync = snd_dma_noncoherent_sync,
0870 };
0871
0872 #endif
0873
0874
0875
0876
0877 static const struct snd_malloc_ops *dma_ops[] = {
0878 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
0879 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
0880 #ifdef CONFIG_HAS_DMA
0881 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
0882 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
0883 [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
0884 [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
0885 #ifdef CONFIG_SND_DMA_SGBUF
0886 [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
0887 #endif
0888 #ifdef CONFIG_GENERIC_ALLOCATOR
0889 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
0890 #endif
0891 #ifdef CONFIG_SND_DMA_SGBUF
0892 [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
0893 [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
0894 #endif
0895 #endif
0896 };
0897
0898 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
0899 {
0900 if (WARN_ON_ONCE(!dmab))
0901 return NULL;
0902 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
0903 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
0904 return NULL;
0905 return dma_ops[dmab->dev.type];
0906 }