0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #define pr_fmt(fmt) "cma: " fmt
0016
0017 #ifdef CONFIG_CMA_DEBUG
0018 #ifndef DEBUG
0019 # define DEBUG
0020 #endif
0021 #endif
0022 #define CREATE_TRACE_POINTS
0023
0024 #include <linux/memblock.h>
0025 #include <linux/err.h>
0026 #include <linux/mm.h>
0027 #include <linux/sizes.h>
0028 #include <linux/slab.h>
0029 #include <linux/log2.h>
0030 #include <linux/cma.h>
0031 #include <linux/highmem.h>
0032 #include <linux/io.h>
0033 #include <linux/kmemleak.h>
0034 #include <trace/events/cma.h>
0035
0036 #include "cma.h"
0037
0038 struct cma cma_areas[MAX_CMA_AREAS];
0039 unsigned cma_area_count;
0040 static DEFINE_MUTEX(cma_mutex);
0041
0042 phys_addr_t cma_get_base(const struct cma *cma)
0043 {
0044 return PFN_PHYS(cma->base_pfn);
0045 }
0046
0047 unsigned long cma_get_size(const struct cma *cma)
0048 {
0049 return cma->count << PAGE_SHIFT;
0050 }
0051
0052 const char *cma_get_name(const struct cma *cma)
0053 {
0054 return cma->name;
0055 }
0056
0057 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
0058 unsigned int align_order)
0059 {
0060 if (align_order <= cma->order_per_bit)
0061 return 0;
0062 return (1UL << (align_order - cma->order_per_bit)) - 1;
0063 }
0064
0065
0066
0067
0068
0069 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
0070 unsigned int align_order)
0071 {
0072 return (cma->base_pfn & ((1UL << align_order) - 1))
0073 >> cma->order_per_bit;
0074 }
0075
0076 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
0077 unsigned long pages)
0078 {
0079 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
0080 }
0081
0082 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
0083 unsigned long count)
0084 {
0085 unsigned long bitmap_no, bitmap_count;
0086 unsigned long flags;
0087
0088 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
0089 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
0090
0091 spin_lock_irqsave(&cma->lock, flags);
0092 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
0093 spin_unlock_irqrestore(&cma->lock, flags);
0094 }
0095
0096 static void __init cma_activate_area(struct cma *cma)
0097 {
0098 unsigned long base_pfn = cma->base_pfn, pfn;
0099 struct zone *zone;
0100
0101 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
0102 if (!cma->bitmap)
0103 goto out_error;
0104
0105
0106
0107
0108
0109
0110 WARN_ON_ONCE(!pfn_valid(base_pfn));
0111 zone = page_zone(pfn_to_page(base_pfn));
0112 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
0113 WARN_ON_ONCE(!pfn_valid(pfn));
0114 if (page_zone(pfn_to_page(pfn)) != zone)
0115 goto not_in_zone;
0116 }
0117
0118 for (pfn = base_pfn; pfn < base_pfn + cma->count;
0119 pfn += pageblock_nr_pages)
0120 init_cma_reserved_pageblock(pfn_to_page(pfn));
0121
0122 spin_lock_init(&cma->lock);
0123
0124 #ifdef CONFIG_CMA_DEBUGFS
0125 INIT_HLIST_HEAD(&cma->mem_head);
0126 spin_lock_init(&cma->mem_head_lock);
0127 #endif
0128
0129 return;
0130
0131 not_in_zone:
0132 bitmap_free(cma->bitmap);
0133 out_error:
0134
0135 if (!cma->reserve_pages_on_error) {
0136 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
0137 free_reserved_page(pfn_to_page(pfn));
0138 }
0139 totalcma_pages -= cma->count;
0140 cma->count = 0;
0141 pr_err("CMA area %s could not be activated\n", cma->name);
0142 return;
0143 }
0144
0145 static int __init cma_init_reserved_areas(void)
0146 {
0147 int i;
0148
0149 for (i = 0; i < cma_area_count; i++)
0150 cma_activate_area(&cma_areas[i]);
0151
0152 return 0;
0153 }
0154 core_initcall(cma_init_reserved_areas);
0155
0156 void __init cma_reserve_pages_on_error(struct cma *cma)
0157 {
0158 cma->reserve_pages_on_error = true;
0159 }
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
0174 unsigned int order_per_bit,
0175 const char *name,
0176 struct cma **res_cma)
0177 {
0178 struct cma *cma;
0179
0180
0181 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
0182 pr_err("Not enough slots for CMA reserved regions!\n");
0183 return -ENOSPC;
0184 }
0185
0186 if (!size || !memblock_is_region_reserved(base, size))
0187 return -EINVAL;
0188
0189
0190 if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
0191 return -EINVAL;
0192
0193
0194 if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
0195 return -EINVAL;
0196
0197
0198
0199
0200
0201 cma = &cma_areas[cma_area_count];
0202
0203 if (name)
0204 snprintf(cma->name, CMA_MAX_NAME, name);
0205 else
0206 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
0207
0208 cma->base_pfn = PFN_DOWN(base);
0209 cma->count = size >> PAGE_SHIFT;
0210 cma->order_per_bit = order_per_bit;
0211 *res_cma = cma;
0212 cma_area_count++;
0213 totalcma_pages += (size / PAGE_SIZE);
0214
0215 return 0;
0216 }
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238 int __init cma_declare_contiguous_nid(phys_addr_t base,
0239 phys_addr_t size, phys_addr_t limit,
0240 phys_addr_t alignment, unsigned int order_per_bit,
0241 bool fixed, const char *name, struct cma **res_cma,
0242 int nid)
0243 {
0244 phys_addr_t memblock_end = memblock_end_of_DRAM();
0245 phys_addr_t highmem_start;
0246 int ret = 0;
0247
0248
0249
0250
0251
0252
0253
0254 highmem_start = __pa(high_memory - 1) + 1;
0255 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
0256 __func__, &size, &base, &limit, &alignment);
0257
0258 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
0259 pr_err("Not enough slots for CMA reserved regions!\n");
0260 return -ENOSPC;
0261 }
0262
0263 if (!size)
0264 return -EINVAL;
0265
0266 if (alignment && !is_power_of_2(alignment))
0267 return -EINVAL;
0268
0269
0270 alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
0271 if (fixed && base & (alignment - 1)) {
0272 ret = -EINVAL;
0273 pr_err("Region at %pa must be aligned to %pa bytes\n",
0274 &base, &alignment);
0275 goto err;
0276 }
0277 base = ALIGN(base, alignment);
0278 size = ALIGN(size, alignment);
0279 limit &= ~(alignment - 1);
0280
0281 if (!base)
0282 fixed = false;
0283
0284
0285 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
0286 return -EINVAL;
0287
0288
0289
0290
0291
0292 if (fixed && base < highmem_start && base + size > highmem_start) {
0293 ret = -EINVAL;
0294 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
0295 &base, &highmem_start);
0296 goto err;
0297 }
0298
0299
0300
0301
0302
0303
0304 if (limit == 0 || limit > memblock_end)
0305 limit = memblock_end;
0306
0307 if (base + size > limit) {
0308 ret = -EINVAL;
0309 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
0310 &size, &base, &limit);
0311 goto err;
0312 }
0313
0314
0315 if (fixed) {
0316 if (memblock_is_region_reserved(base, size) ||
0317 memblock_reserve(base, size) < 0) {
0318 ret = -EBUSY;
0319 goto err;
0320 }
0321 } else {
0322 phys_addr_t addr = 0;
0323
0324
0325
0326
0327
0328
0329
0330 if (base < highmem_start && limit > highmem_start) {
0331 addr = memblock_alloc_range_nid(size, alignment,
0332 highmem_start, limit, nid, true);
0333 limit = highmem_start;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0345 if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
0346 memblock_set_bottom_up(true);
0347 addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
0348 limit, nid, true);
0349 memblock_set_bottom_up(false);
0350 }
0351 #endif
0352
0353 if (!addr) {
0354 addr = memblock_alloc_range_nid(size, alignment, base,
0355 limit, nid, true);
0356 if (!addr) {
0357 ret = -ENOMEM;
0358 goto err;
0359 }
0360 }
0361
0362
0363
0364
0365
0366 kmemleak_ignore_phys(addr);
0367 base = addr;
0368 }
0369
0370 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
0371 if (ret)
0372 goto free_mem;
0373
0374 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
0375 &base);
0376 return 0;
0377
0378 free_mem:
0379 memblock_phys_free(base, size);
0380 err:
0381 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
0382 return ret;
0383 }
0384
0385 #ifdef CONFIG_CMA_DEBUG
0386 static void cma_debug_show_areas(struct cma *cma)
0387 {
0388 unsigned long next_zero_bit, next_set_bit, nr_zero;
0389 unsigned long start = 0;
0390 unsigned long nr_part, nr_total = 0;
0391 unsigned long nbits = cma_bitmap_maxno(cma);
0392
0393 spin_lock_irq(&cma->lock);
0394 pr_info("number of available pages: ");
0395 for (;;) {
0396 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
0397 if (next_zero_bit >= nbits)
0398 break;
0399 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
0400 nr_zero = next_set_bit - next_zero_bit;
0401 nr_part = nr_zero << cma->order_per_bit;
0402 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
0403 next_zero_bit);
0404 nr_total += nr_part;
0405 start = next_zero_bit + nr_zero;
0406 }
0407 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
0408 spin_unlock_irq(&cma->lock);
0409 }
0410 #else
0411 static inline void cma_debug_show_areas(struct cma *cma) { }
0412 #endif
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424 struct page *cma_alloc(struct cma *cma, unsigned long count,
0425 unsigned int align, bool no_warn)
0426 {
0427 unsigned long mask, offset;
0428 unsigned long pfn = -1;
0429 unsigned long start = 0;
0430 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
0431 unsigned long i;
0432 struct page *page = NULL;
0433 int ret = -ENOMEM;
0434
0435 if (!cma || !cma->count || !cma->bitmap)
0436 goto out;
0437
0438 pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma,
0439 count, align);
0440
0441 if (!count)
0442 goto out;
0443
0444 trace_cma_alloc_start(cma->name, count, align);
0445
0446 mask = cma_bitmap_aligned_mask(cma, align);
0447 offset = cma_bitmap_aligned_offset(cma, align);
0448 bitmap_maxno = cma_bitmap_maxno(cma);
0449 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
0450
0451 if (bitmap_count > bitmap_maxno)
0452 goto out;
0453
0454 for (;;) {
0455 spin_lock_irq(&cma->lock);
0456 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
0457 bitmap_maxno, start, bitmap_count, mask,
0458 offset);
0459 if (bitmap_no >= bitmap_maxno) {
0460 spin_unlock_irq(&cma->lock);
0461 break;
0462 }
0463 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
0464
0465
0466
0467
0468
0469 spin_unlock_irq(&cma->lock);
0470
0471 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
0472 mutex_lock(&cma_mutex);
0473 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
0474 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
0475 mutex_unlock(&cma_mutex);
0476 if (ret == 0) {
0477 page = pfn_to_page(pfn);
0478 break;
0479 }
0480
0481 cma_clear_bitmap(cma, pfn, count);
0482 if (ret != -EBUSY)
0483 break;
0484
0485 pr_debug("%s(): memory range at %p is busy, retrying\n",
0486 __func__, pfn_to_page(pfn));
0487
0488 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
0489 count, align);
0490
0491 start = bitmap_no + mask + 1;
0492 }
0493
0494 trace_cma_alloc_finish(cma->name, pfn, page, count, align);
0495
0496
0497
0498
0499
0500
0501 if (page) {
0502 for (i = 0; i < count; i++)
0503 page_kasan_tag_reset(page + i);
0504 }
0505
0506 if (ret && !no_warn) {
0507 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
0508 __func__, cma->name, count, ret);
0509 cma_debug_show_areas(cma);
0510 }
0511
0512 pr_debug("%s(): returned %p\n", __func__, page);
0513 out:
0514 if (page) {
0515 count_vm_event(CMA_ALLOC_SUCCESS);
0516 cma_sysfs_account_success_pages(cma, count);
0517 } else {
0518 count_vm_event(CMA_ALLOC_FAIL);
0519 if (cma)
0520 cma_sysfs_account_fail_pages(cma, count);
0521 }
0522
0523 return page;
0524 }
0525
0526 bool cma_pages_valid(struct cma *cma, const struct page *pages,
0527 unsigned long count)
0528 {
0529 unsigned long pfn;
0530
0531 if (!cma || !pages)
0532 return false;
0533
0534 pfn = page_to_pfn(pages);
0535
0536 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
0537 pr_debug("%s(page %p, count %lu)\n", __func__,
0538 (void *)pages, count);
0539 return false;
0540 }
0541
0542 return true;
0543 }
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 bool cma_release(struct cma *cma, const struct page *pages,
0556 unsigned long count)
0557 {
0558 unsigned long pfn;
0559
0560 if (!cma_pages_valid(cma, pages, count))
0561 return false;
0562
0563 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
0564
0565 pfn = page_to_pfn(pages);
0566
0567 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
0568
0569 free_contig_range(pfn, count);
0570 cma_clear_bitmap(cma, pfn, count);
0571 trace_cma_release(cma->name, pfn, pages, count);
0572
0573 return true;
0574 }
0575
0576 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
0577 {
0578 int i;
0579
0580 for (i = 0; i < cma_area_count; i++) {
0581 int ret = it(&cma_areas[i], data);
0582
0583 if (ret)
0584 return ret;
0585 }
0586
0587 return 0;
0588 }