0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #define pr_fmt(fmt) "software IO TLB: " fmt
0022
0023 #include <linux/cache.h>
0024 #include <linux/cc_platform.h>
0025 #include <linux/ctype.h>
0026 #include <linux/debugfs.h>
0027 #include <linux/dma-direct.h>
0028 #include <linux/dma-map-ops.h>
0029 #include <linux/export.h>
0030 #include <linux/gfp.h>
0031 #include <linux/highmem.h>
0032 #include <linux/io.h>
0033 #include <linux/iommu-helper.h>
0034 #include <linux/init.h>
0035 #include <linux/memblock.h>
0036 #include <linux/mm.h>
0037 #include <linux/pfn.h>
0038 #include <linux/scatterlist.h>
0039 #include <linux/set_memory.h>
0040 #include <linux/spinlock.h>
0041 #include <linux/string.h>
0042 #include <linux/swiotlb.h>
0043 #include <linux/types.h>
0044 #ifdef CONFIG_DMA_RESTRICTED_POOL
0045 #include <linux/of.h>
0046 #include <linux/of_fdt.h>
0047 #include <linux/of_reserved_mem.h>
0048 #include <linux/slab.h>
0049 #endif
0050
0051 #define CREATE_TRACE_POINTS
0052 #include <trace/events/swiotlb.h>
0053
0054 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
0055
0056
0057
0058
0059
0060
0061 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
0062
0063 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
0064
0065 struct io_tlb_slot {
0066 phys_addr_t orig_addr;
0067 size_t alloc_size;
0068 unsigned int list;
0069 };
0070
0071 static bool swiotlb_force_bounce;
0072 static bool swiotlb_force_disable;
0073
0074 struct io_tlb_mem io_tlb_default_mem;
0075
0076 phys_addr_t swiotlb_unencrypted_base;
0077
0078 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
0079 static unsigned long default_nareas;
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 struct io_tlb_area {
0092 unsigned long used;
0093 unsigned int index;
0094 spinlock_t lock;
0095 };
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 static bool round_up_default_nslabs(void)
0108 {
0109 if (!default_nareas)
0110 return false;
0111
0112 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
0113 default_nslabs = IO_TLB_SEGSIZE * default_nareas;
0114 else if (is_power_of_2(default_nslabs))
0115 return false;
0116 default_nslabs = roundup_pow_of_two(default_nslabs);
0117 return true;
0118 }
0119
0120 static void swiotlb_adjust_nareas(unsigned int nareas)
0121 {
0122
0123 if (!nareas)
0124 nareas = 1;
0125 else if (!is_power_of_2(nareas))
0126 nareas = roundup_pow_of_two(nareas);
0127
0128 default_nareas = nareas;
0129
0130 pr_info("area num %d.\n", nareas);
0131 if (round_up_default_nslabs())
0132 pr_info("SWIOTLB bounce buffer size roundup to %luMB",
0133 (default_nslabs << IO_TLB_SHIFT) >> 20);
0134 }
0135
0136 static int __init
0137 setup_io_tlb_npages(char *str)
0138 {
0139 if (isdigit(*str)) {
0140
0141 default_nslabs =
0142 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
0143 }
0144 if (*str == ',')
0145 ++str;
0146 if (isdigit(*str))
0147 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
0148 if (*str == ',')
0149 ++str;
0150 if (!strcmp(str, "force"))
0151 swiotlb_force_bounce = true;
0152 else if (!strcmp(str, "noforce"))
0153 swiotlb_force_disable = true;
0154
0155 return 0;
0156 }
0157 early_param("swiotlb", setup_io_tlb_npages);
0158
0159 unsigned int swiotlb_max_segment(void)
0160 {
0161 if (!io_tlb_default_mem.nslabs)
0162 return 0;
0163 return rounddown(io_tlb_default_mem.nslabs << IO_TLB_SHIFT, PAGE_SIZE);
0164 }
0165 EXPORT_SYMBOL_GPL(swiotlb_max_segment);
0166
0167 unsigned long swiotlb_size_or_default(void)
0168 {
0169 return default_nslabs << IO_TLB_SHIFT;
0170 }
0171
0172 void __init swiotlb_adjust_size(unsigned long size)
0173 {
0174
0175
0176
0177
0178
0179 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
0180 return;
0181
0182 size = ALIGN(size, IO_TLB_SIZE);
0183 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
0184 if (round_up_default_nslabs())
0185 size = default_nslabs << IO_TLB_SHIFT;
0186 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
0187 }
0188
0189 void swiotlb_print_info(void)
0190 {
0191 struct io_tlb_mem *mem = &io_tlb_default_mem;
0192
0193 if (!mem->nslabs) {
0194 pr_warn("No low mem\n");
0195 return;
0196 }
0197
0198 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
0199 (mem->nslabs << IO_TLB_SHIFT) >> 20);
0200 }
0201
0202 static inline unsigned long io_tlb_offset(unsigned long val)
0203 {
0204 return val & (IO_TLB_SEGSIZE - 1);
0205 }
0206
0207 static inline unsigned long nr_slots(u64 val)
0208 {
0209 return DIV_ROUND_UP(val, IO_TLB_SIZE);
0210 }
0211
0212
0213
0214
0215
0216
0217 #ifdef CONFIG_HAS_IOMEM
0218 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
0219 {
0220 void *vaddr = NULL;
0221
0222 if (swiotlb_unencrypted_base) {
0223 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
0224
0225 vaddr = memremap(paddr, bytes, MEMREMAP_WB);
0226 if (!vaddr)
0227 pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
0228 &paddr, bytes);
0229 }
0230
0231 return vaddr;
0232 }
0233 #else
0234 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
0235 {
0236 return NULL;
0237 }
0238 #endif
0239
0240
0241
0242
0243
0244
0245
0246 void __init swiotlb_update_mem_attributes(void)
0247 {
0248 struct io_tlb_mem *mem = &io_tlb_default_mem;
0249 void *vaddr;
0250 unsigned long bytes;
0251
0252 if (!mem->nslabs || mem->late_alloc)
0253 return;
0254 vaddr = phys_to_virt(mem->start);
0255 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
0256 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
0257
0258 mem->vaddr = swiotlb_mem_remap(mem, bytes);
0259 if (!mem->vaddr)
0260 mem->vaddr = vaddr;
0261 }
0262
0263 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
0264 unsigned long nslabs, unsigned int flags,
0265 bool late_alloc, unsigned int nareas)
0266 {
0267 void *vaddr = phys_to_virt(start);
0268 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
0269
0270 mem->nslabs = nslabs;
0271 mem->start = start;
0272 mem->end = mem->start + bytes;
0273 mem->late_alloc = late_alloc;
0274 mem->nareas = nareas;
0275 mem->area_nslabs = nslabs / mem->nareas;
0276
0277 mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
0278
0279 for (i = 0; i < mem->nareas; i++) {
0280 spin_lock_init(&mem->areas[i].lock);
0281 mem->areas[i].index = 0;
0282 mem->areas[i].used = 0;
0283 }
0284
0285 for (i = 0; i < mem->nslabs; i++) {
0286 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
0287 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
0288 mem->slots[i].alloc_size = 0;
0289 }
0290
0291
0292
0293
0294
0295 if (swiotlb_unencrypted_base)
0296 return;
0297
0298 memset(vaddr, 0, bytes);
0299 mem->vaddr = vaddr;
0300 return;
0301 }
0302
0303
0304
0305
0306
0307 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
0308 int (*remap)(void *tlb, unsigned long nslabs))
0309 {
0310 struct io_tlb_mem *mem = &io_tlb_default_mem;
0311 unsigned long nslabs;
0312 size_t alloc_size;
0313 size_t bytes;
0314 void *tlb;
0315
0316 if (!addressing_limit && !swiotlb_force_bounce)
0317 return;
0318 if (swiotlb_force_disable)
0319 return;
0320
0321
0322
0323
0324
0325 if (!default_nareas)
0326 swiotlb_adjust_nareas(num_possible_cpus());
0327
0328 nslabs = default_nslabs;
0329
0330
0331
0332
0333
0334 retry:
0335 bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
0336 if (flags & SWIOTLB_ANY)
0337 tlb = memblock_alloc(bytes, PAGE_SIZE);
0338 else
0339 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
0340 if (!tlb) {
0341 pr_warn("%s: failed to allocate tlb structure\n", __func__);
0342 return;
0343 }
0344
0345 if (remap && remap(tlb, nslabs) < 0) {
0346 memblock_free(tlb, PAGE_ALIGN(bytes));
0347
0348 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
0349 if (nslabs < IO_TLB_MIN_SLABS)
0350 panic("%s: Failed to remap %zu bytes\n",
0351 __func__, bytes);
0352 goto retry;
0353 }
0354
0355 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
0356 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
0357 if (!mem->slots)
0358 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
0359 __func__, alloc_size, PAGE_SIZE);
0360
0361 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
0362 default_nareas), SMP_CACHE_BYTES);
0363 if (!mem->areas)
0364 panic("%s: Failed to allocate mem->areas.\n", __func__);
0365
0366 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
0367 default_nareas);
0368
0369 if (flags & SWIOTLB_VERBOSE)
0370 swiotlb_print_info();
0371 }
0372
0373 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
0374 {
0375 swiotlb_init_remap(addressing_limit, flags, NULL);
0376 }
0377
0378
0379
0380
0381
0382
0383 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
0384 int (*remap)(void *tlb, unsigned long nslabs))
0385 {
0386 struct io_tlb_mem *mem = &io_tlb_default_mem;
0387 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
0388 unsigned char *vstart = NULL;
0389 unsigned int order, area_order;
0390 bool retried = false;
0391 int rc = 0;
0392
0393 if (swiotlb_force_disable)
0394 return 0;
0395
0396 retry:
0397 order = get_order(nslabs << IO_TLB_SHIFT);
0398 nslabs = SLABS_PER_PAGE << order;
0399
0400 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
0401 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
0402 order);
0403 if (vstart)
0404 break;
0405 order--;
0406 nslabs = SLABS_PER_PAGE << order;
0407 retried = true;
0408 }
0409
0410 if (!vstart)
0411 return -ENOMEM;
0412
0413 if (remap)
0414 rc = remap(vstart, nslabs);
0415 if (rc) {
0416 free_pages((unsigned long)vstart, order);
0417
0418 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
0419 if (nslabs < IO_TLB_MIN_SLABS)
0420 return rc;
0421 retried = true;
0422 goto retry;
0423 }
0424
0425 if (retried) {
0426 pr_warn("only able to allocate %ld MB\n",
0427 (PAGE_SIZE << order) >> 20);
0428 }
0429
0430 if (!default_nareas)
0431 swiotlb_adjust_nareas(num_possible_cpus());
0432
0433 area_order = get_order(array_size(sizeof(*mem->areas),
0434 default_nareas));
0435 mem->areas = (struct io_tlb_area *)
0436 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
0437 if (!mem->areas)
0438 goto error_area;
0439
0440 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
0441 get_order(array_size(sizeof(*mem->slots), nslabs)));
0442 if (!mem->slots)
0443 goto error_slots;
0444
0445 set_memory_decrypted((unsigned long)vstart,
0446 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
0447 swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
0448 default_nareas);
0449
0450 swiotlb_print_info();
0451 return 0;
0452
0453 error_slots:
0454 free_pages((unsigned long)mem->areas, area_order);
0455 error_area:
0456 free_pages((unsigned long)vstart, order);
0457 return -ENOMEM;
0458 }
0459
0460 void __init swiotlb_exit(void)
0461 {
0462 struct io_tlb_mem *mem = &io_tlb_default_mem;
0463 unsigned long tbl_vaddr;
0464 size_t tbl_size, slots_size;
0465 unsigned int area_order;
0466
0467 if (swiotlb_force_bounce)
0468 return;
0469
0470 if (!mem->nslabs)
0471 return;
0472
0473 pr_info("tearing down default memory pool\n");
0474 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
0475 tbl_size = PAGE_ALIGN(mem->end - mem->start);
0476 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
0477
0478 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
0479 if (mem->late_alloc) {
0480 area_order = get_order(array_size(sizeof(*mem->areas),
0481 mem->nareas));
0482 free_pages((unsigned long)mem->areas, area_order);
0483 free_pages(tbl_vaddr, get_order(tbl_size));
0484 free_pages((unsigned long)mem->slots, get_order(slots_size));
0485 } else {
0486 memblock_free_late(__pa(mem->areas),
0487 array_size(sizeof(*mem->areas), mem->nareas));
0488 memblock_free_late(mem->start, tbl_size);
0489 memblock_free_late(__pa(mem->slots), slots_size);
0490 }
0491
0492 memset(mem, 0, sizeof(*mem));
0493 }
0494
0495
0496
0497
0498 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
0499 {
0500 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
0501 }
0502
0503
0504
0505
0506 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
0507 enum dma_data_direction dir)
0508 {
0509 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0510 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
0511 phys_addr_t orig_addr = mem->slots[index].orig_addr;
0512 size_t alloc_size = mem->slots[index].alloc_size;
0513 unsigned long pfn = PFN_DOWN(orig_addr);
0514 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
0515 unsigned int tlb_offset, orig_addr_offset;
0516
0517 if (orig_addr == INVALID_PHYS_ADDR)
0518 return;
0519
0520 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
0521 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
0522 if (tlb_offset < orig_addr_offset) {
0523 dev_WARN_ONCE(dev, 1,
0524 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
0525 orig_addr_offset, tlb_offset);
0526 return;
0527 }
0528
0529 tlb_offset -= orig_addr_offset;
0530 if (tlb_offset > alloc_size) {
0531 dev_WARN_ONCE(dev, 1,
0532 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
0533 alloc_size, size, tlb_offset);
0534 return;
0535 }
0536
0537 orig_addr += tlb_offset;
0538 alloc_size -= tlb_offset;
0539
0540 if (size > alloc_size) {
0541 dev_WARN_ONCE(dev, 1,
0542 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
0543 alloc_size, size);
0544 size = alloc_size;
0545 }
0546
0547 if (PageHighMem(pfn_to_page(pfn))) {
0548
0549 unsigned int offset = orig_addr & ~PAGE_MASK;
0550 char *buffer;
0551 unsigned int sz = 0;
0552 unsigned long flags;
0553
0554 while (size) {
0555 sz = min_t(size_t, PAGE_SIZE - offset, size);
0556
0557 local_irq_save(flags);
0558 buffer = kmap_atomic(pfn_to_page(pfn));
0559 if (dir == DMA_TO_DEVICE)
0560 memcpy(vaddr, buffer + offset, sz);
0561 else
0562 memcpy(buffer + offset, vaddr, sz);
0563 kunmap_atomic(buffer);
0564 local_irq_restore(flags);
0565
0566 size -= sz;
0567 pfn++;
0568 vaddr += sz;
0569 offset = 0;
0570 }
0571 } else if (dir == DMA_TO_DEVICE) {
0572 memcpy(vaddr, phys_to_virt(orig_addr), size);
0573 } else {
0574 memcpy(phys_to_virt(orig_addr), vaddr, size);
0575 }
0576 }
0577
0578 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
0579 {
0580 return start + (idx << IO_TLB_SHIFT);
0581 }
0582
0583
0584
0585
0586 static inline unsigned long get_max_slots(unsigned long boundary_mask)
0587 {
0588 if (boundary_mask == ~0UL)
0589 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
0590 return nr_slots(boundary_mask + 1);
0591 }
0592
0593 static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
0594 {
0595 if (index >= mem->area_nslabs)
0596 return 0;
0597 return index;
0598 }
0599
0600
0601
0602
0603
0604 static int swiotlb_do_find_slots(struct device *dev, int area_index,
0605 phys_addr_t orig_addr, size_t alloc_size,
0606 unsigned int alloc_align_mask)
0607 {
0608 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0609 struct io_tlb_area *area = mem->areas + area_index;
0610 unsigned long boundary_mask = dma_get_seg_boundary(dev);
0611 dma_addr_t tbl_dma_addr =
0612 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
0613 unsigned long max_slots = get_max_slots(boundary_mask);
0614 unsigned int iotlb_align_mask =
0615 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
0616 unsigned int nslots = nr_slots(alloc_size), stride;
0617 unsigned int index, wrap, count = 0, i;
0618 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
0619 unsigned long flags;
0620 unsigned int slot_base;
0621 unsigned int slot_index;
0622
0623 BUG_ON(!nslots);
0624 BUG_ON(area_index >= mem->nareas);
0625
0626
0627
0628
0629
0630
0631 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
0632 if (alloc_size >= PAGE_SIZE)
0633 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
0634 stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
0635
0636 spin_lock_irqsave(&area->lock, flags);
0637 if (unlikely(nslots > mem->area_nslabs - area->used))
0638 goto not_found;
0639
0640 slot_base = area_index * mem->area_nslabs;
0641 index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
0642
0643 do {
0644 slot_index = slot_base + index;
0645
0646 if (orig_addr &&
0647 (slot_addr(tbl_dma_addr, slot_index) &
0648 iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
0649 index = wrap_area_index(mem, index + 1);
0650 continue;
0651 }
0652
0653
0654
0655
0656
0657
0658 if (!iommu_is_span_boundary(slot_index, nslots,
0659 nr_slots(tbl_dma_addr),
0660 max_slots)) {
0661 if (mem->slots[slot_index].list >= nslots)
0662 goto found;
0663 }
0664 index = wrap_area_index(mem, index + stride);
0665 } while (index != wrap);
0666
0667 not_found:
0668 spin_unlock_irqrestore(&area->lock, flags);
0669 return -1;
0670
0671 found:
0672 for (i = slot_index; i < slot_index + nslots; i++) {
0673 mem->slots[i].list = 0;
0674 mem->slots[i].alloc_size = alloc_size - (offset +
0675 ((i - slot_index) << IO_TLB_SHIFT));
0676 }
0677 for (i = slot_index - 1;
0678 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
0679 mem->slots[i].list; i--)
0680 mem->slots[i].list = ++count;
0681
0682
0683
0684
0685 if (index + nslots < mem->area_nslabs)
0686 area->index = index + nslots;
0687 else
0688 area->index = 0;
0689 area->used += nslots;
0690 spin_unlock_irqrestore(&area->lock, flags);
0691 return slot_index;
0692 }
0693
0694 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
0695 size_t alloc_size, unsigned int alloc_align_mask)
0696 {
0697 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0698 int start = raw_smp_processor_id() & (mem->nareas - 1);
0699 int i = start, index;
0700
0701 do {
0702 index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
0703 alloc_align_mask);
0704 if (index >= 0)
0705 return index;
0706 if (++i >= mem->nareas)
0707 i = 0;
0708 } while (i != start);
0709
0710 return -1;
0711 }
0712
0713 static unsigned long mem_used(struct io_tlb_mem *mem)
0714 {
0715 int i;
0716 unsigned long used = 0;
0717
0718 for (i = 0; i < mem->nareas; i++)
0719 used += mem->areas[i].used;
0720 return used;
0721 }
0722
0723 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
0724 size_t mapping_size, size_t alloc_size,
0725 unsigned int alloc_align_mask, enum dma_data_direction dir,
0726 unsigned long attrs)
0727 {
0728 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0729 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
0730 unsigned int i;
0731 int index;
0732 phys_addr_t tlb_addr;
0733
0734 if (!mem || !mem->nslabs)
0735 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
0736
0737 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
0738 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
0739
0740 if (mapping_size > alloc_size) {
0741 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
0742 mapping_size, alloc_size);
0743 return (phys_addr_t)DMA_MAPPING_ERROR;
0744 }
0745
0746 index = swiotlb_find_slots(dev, orig_addr,
0747 alloc_size + offset, alloc_align_mask);
0748 if (index == -1) {
0749 if (!(attrs & DMA_ATTR_NO_WARN))
0750 dev_warn_ratelimited(dev,
0751 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
0752 alloc_size, mem->nslabs, mem_used(mem));
0753 return (phys_addr_t)DMA_MAPPING_ERROR;
0754 }
0755
0756
0757
0758
0759
0760
0761 for (i = 0; i < nr_slots(alloc_size + offset); i++)
0762 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
0763 tlb_addr = slot_addr(mem->start, index) + offset;
0764
0765
0766
0767
0768
0769
0770
0771 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
0772 return tlb_addr;
0773 }
0774
0775 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
0776 {
0777 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0778 unsigned long flags;
0779 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
0780 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
0781 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
0782 int aindex = index / mem->area_nslabs;
0783 struct io_tlb_area *area = &mem->areas[aindex];
0784 int count, i;
0785
0786
0787
0788
0789
0790
0791
0792 BUG_ON(aindex >= mem->nareas);
0793
0794 spin_lock_irqsave(&area->lock, flags);
0795 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
0796 count = mem->slots[index + nslots].list;
0797 else
0798 count = 0;
0799
0800
0801
0802
0803
0804 for (i = index + nslots - 1; i >= index; i--) {
0805 mem->slots[i].list = ++count;
0806 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
0807 mem->slots[i].alloc_size = 0;
0808 }
0809
0810
0811
0812
0813
0814 for (i = index - 1;
0815 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
0816 i--)
0817 mem->slots[i].list = ++count;
0818 area->used -= nslots;
0819 spin_unlock_irqrestore(&area->lock, flags);
0820 }
0821
0822
0823
0824
0825 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
0826 size_t mapping_size, enum dma_data_direction dir,
0827 unsigned long attrs)
0828 {
0829
0830
0831
0832 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
0833 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
0834 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
0835
0836 swiotlb_release_slots(dev, tlb_addr);
0837 }
0838
0839 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
0840 size_t size, enum dma_data_direction dir)
0841 {
0842 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
0843 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
0844 else
0845 BUG_ON(dir != DMA_FROM_DEVICE);
0846 }
0847
0848 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
0849 size_t size, enum dma_data_direction dir)
0850 {
0851 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
0852 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
0853 else
0854 BUG_ON(dir != DMA_TO_DEVICE);
0855 }
0856
0857
0858
0859
0860
0861 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
0862 enum dma_data_direction dir, unsigned long attrs)
0863 {
0864 phys_addr_t swiotlb_addr;
0865 dma_addr_t dma_addr;
0866
0867 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
0868
0869 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
0870 attrs);
0871 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
0872 return DMA_MAPPING_ERROR;
0873
0874
0875 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
0876 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
0877 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
0878 attrs | DMA_ATTR_SKIP_CPU_SYNC);
0879 dev_WARN_ONCE(dev, 1,
0880 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
0881 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
0882 return DMA_MAPPING_ERROR;
0883 }
0884
0885 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
0886 arch_sync_dma_for_device(swiotlb_addr, size, dir);
0887 return dma_addr;
0888 }
0889
0890 size_t swiotlb_max_mapping_size(struct device *dev)
0891 {
0892 int min_align_mask = dma_get_min_align_mask(dev);
0893 int min_align = 0;
0894
0895
0896
0897
0898
0899
0900 if (min_align_mask)
0901 min_align = roundup(min_align_mask, IO_TLB_SIZE);
0902
0903 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
0904 }
0905
0906 bool is_swiotlb_active(struct device *dev)
0907 {
0908 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0909
0910 return mem && mem->nslabs;
0911 }
0912 EXPORT_SYMBOL_GPL(is_swiotlb_active);
0913
0914 static int io_tlb_used_get(void *data, u64 *val)
0915 {
0916 *val = mem_used(&io_tlb_default_mem);
0917 return 0;
0918 }
0919 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
0920
0921 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
0922 const char *dirname)
0923 {
0924 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
0925 if (!mem->nslabs)
0926 return;
0927
0928 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
0929 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
0930 &fops_io_tlb_used);
0931 }
0932
0933 static int __init __maybe_unused swiotlb_create_default_debugfs(void)
0934 {
0935 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
0936 return 0;
0937 }
0938
0939 #ifdef CONFIG_DEBUG_FS
0940 late_initcall(swiotlb_create_default_debugfs);
0941 #endif
0942
0943 #ifdef CONFIG_DMA_RESTRICTED_POOL
0944
0945 struct page *swiotlb_alloc(struct device *dev, size_t size)
0946 {
0947 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0948 phys_addr_t tlb_addr;
0949 int index;
0950
0951 if (!mem)
0952 return NULL;
0953
0954 index = swiotlb_find_slots(dev, 0, size, 0);
0955 if (index == -1)
0956 return NULL;
0957
0958 tlb_addr = slot_addr(mem->start, index);
0959
0960 return pfn_to_page(PFN_DOWN(tlb_addr));
0961 }
0962
0963 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
0964 {
0965 phys_addr_t tlb_addr = page_to_phys(page);
0966
0967 if (!is_swiotlb_buffer(dev, tlb_addr))
0968 return false;
0969
0970 swiotlb_release_slots(dev, tlb_addr);
0971
0972 return true;
0973 }
0974
0975 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
0976 struct device *dev)
0977 {
0978 struct io_tlb_mem *mem = rmem->priv;
0979 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
0980
0981
0982 unsigned int nareas = 1;
0983
0984
0985
0986
0987
0988
0989 if (!mem) {
0990 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
0991 if (!mem)
0992 return -ENOMEM;
0993
0994 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL);
0995 if (!mem->slots) {
0996 kfree(mem);
0997 return -ENOMEM;
0998 }
0999
1000 mem->areas = kcalloc(nareas, sizeof(*mem->areas),
1001 GFP_KERNEL);
1002 if (!mem->areas) {
1003 kfree(mem->slots);
1004 kfree(mem);
1005 return -ENOMEM;
1006 }
1007
1008 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1009 rmem->size >> PAGE_SHIFT);
1010 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
1011 false, nareas);
1012 mem->for_alloc = true;
1013
1014 rmem->priv = mem;
1015
1016 swiotlb_create_debugfs_files(mem, rmem->name);
1017 }
1018
1019 dev->dma_io_tlb_mem = mem;
1020
1021 return 0;
1022 }
1023
1024 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1025 struct device *dev)
1026 {
1027 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1028 }
1029
1030 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1031 .device_init = rmem_swiotlb_device_init,
1032 .device_release = rmem_swiotlb_device_release,
1033 };
1034
1035 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1036 {
1037 unsigned long node = rmem->fdt_node;
1038
1039 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1040 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1041 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1042 of_get_flat_dt_prop(node, "no-map", NULL))
1043 return -EINVAL;
1044
1045 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1046 pr_err("Restricted DMA pool must be accessible within the linear mapping.");
1047 return -EINVAL;
1048 }
1049
1050 rmem->ops = &rmem_swiotlb_ops;
1051 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1052 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1053 return 0;
1054 }
1055
1056 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1057 #endif