0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #define pr_fmt(fmt) "cma: " fmt
0039
0040 #ifdef CONFIG_CMA_DEBUG
0041 #ifndef DEBUG
0042 # define DEBUG
0043 #endif
0044 #endif
0045
0046 #include <asm/page.h>
0047
0048 #include <linux/memblock.h>
0049 #include <linux/err.h>
0050 #include <linux/sizes.h>
0051 #include <linux/dma-map-ops.h>
0052 #include <linux/cma.h>
0053
0054 #ifdef CONFIG_CMA_SIZE_MBYTES
0055 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
0056 #else
0057 #define CMA_SIZE_MBYTES 0
0058 #endif
0059
0060 struct cma *dma_contiguous_default_area;
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 static const phys_addr_t size_bytes __initconst =
0073 (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
0074 static phys_addr_t size_cmdline __initdata = -1;
0075 static phys_addr_t base_cmdline __initdata;
0076 static phys_addr_t limit_cmdline __initdata;
0077
0078 static int __init early_cma(char *p)
0079 {
0080 if (!p) {
0081 pr_err("Config string not provided\n");
0082 return -EINVAL;
0083 }
0084
0085 size_cmdline = memparse(p, &p);
0086 if (*p != '@')
0087 return 0;
0088 base_cmdline = memparse(p + 1, &p);
0089 if (*p != '-') {
0090 limit_cmdline = base_cmdline + size_cmdline;
0091 return 0;
0092 }
0093 limit_cmdline = memparse(p + 1, &p);
0094
0095 return 0;
0096 }
0097 early_param("cma", early_cma);
0098
0099 #ifdef CONFIG_DMA_PERNUMA_CMA
0100
0101 static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
0102 static phys_addr_t pernuma_size_bytes __initdata;
0103
0104 static int __init early_cma_pernuma(char *p)
0105 {
0106 pernuma_size_bytes = memparse(p, &p);
0107 return 0;
0108 }
0109 early_param("cma_pernuma", early_cma_pernuma);
0110 #endif
0111
0112 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
0113
0114 static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
0115 {
0116 unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size());
0117
0118 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
0119 }
0120
0121 #else
0122
0123 static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
0124 {
0125 return 0;
0126 }
0127
0128 #endif
0129
0130 #ifdef CONFIG_DMA_PERNUMA_CMA
0131 void __init dma_pernuma_cma_reserve(void)
0132 {
0133 int nid;
0134
0135 if (!pernuma_size_bytes)
0136 return;
0137
0138 for_each_online_node(nid) {
0139 int ret;
0140 char name[CMA_MAX_NAME];
0141 struct cma **cma = &dma_contiguous_pernuma_area[nid];
0142
0143 snprintf(name, sizeof(name), "pernuma%d", nid);
0144 ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
0145 0, false, name, cma, nid);
0146 if (ret) {
0147 pr_warn("%s: reservation failed: err %d, node %d", __func__,
0148 ret, nid);
0149 continue;
0150 }
0151
0152 pr_debug("%s: reserved %llu MiB on node %d\n", __func__,
0153 (unsigned long long)pernuma_size_bytes / SZ_1M, nid);
0154 }
0155 }
0156 #endif
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 void __init dma_contiguous_reserve(phys_addr_t limit)
0168 {
0169 phys_addr_t selected_size = 0;
0170 phys_addr_t selected_base = 0;
0171 phys_addr_t selected_limit = limit;
0172 bool fixed = false;
0173
0174 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
0175
0176 if (size_cmdline != -1) {
0177 selected_size = size_cmdline;
0178 selected_base = base_cmdline;
0179 selected_limit = min_not_zero(limit_cmdline, limit);
0180 if (base_cmdline + size_cmdline == limit_cmdline)
0181 fixed = true;
0182 } else {
0183 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
0184 selected_size = size_bytes;
0185 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
0186 selected_size = cma_early_percent_memory();
0187 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
0188 selected_size = min(size_bytes, cma_early_percent_memory());
0189 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
0190 selected_size = max(size_bytes, cma_early_percent_memory());
0191 #endif
0192 }
0193
0194 if (selected_size && !dma_contiguous_default_area) {
0195 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
0196 (unsigned long)selected_size / SZ_1M);
0197
0198 dma_contiguous_reserve_area(selected_size, selected_base,
0199 selected_limit,
0200 &dma_contiguous_default_area,
0201 fixed);
0202 }
0203 }
0204
0205 void __weak
0206 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
0207 {
0208 }
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
0228 phys_addr_t limit, struct cma **res_cma,
0229 bool fixed)
0230 {
0231 int ret;
0232
0233 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
0234 "reserved", res_cma);
0235 if (ret)
0236 return ret;
0237
0238
0239 dma_contiguous_early_fixup(cma_get_base(*res_cma),
0240 cma_get_size(*res_cma));
0241
0242 return 0;
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
0258 unsigned int align, bool no_warn)
0259 {
0260 if (align > CONFIG_CMA_ALIGNMENT)
0261 align = CONFIG_CMA_ALIGNMENT;
0262
0263 return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
0264 }
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
0277 int count)
0278 {
0279 return cma_release(dev_get_cma_area(dev), pages, count);
0280 }
0281
0282 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
0283 {
0284 unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
0285
0286 return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
0287 }
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
0305 {
0306 #ifdef CONFIG_DMA_PERNUMA_CMA
0307 int nid = dev_to_node(dev);
0308 #endif
0309
0310
0311 if (!gfpflags_allow_blocking(gfp))
0312 return NULL;
0313 if (dev->cma_area)
0314 return cma_alloc_aligned(dev->cma_area, size, gfp);
0315 if (size <= PAGE_SIZE)
0316 return NULL;
0317
0318 #ifdef CONFIG_DMA_PERNUMA_CMA
0319 if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
0320 struct cma *cma = dma_contiguous_pernuma_area[nid];
0321 struct page *page;
0322
0323 if (cma) {
0324 page = cma_alloc_aligned(cma, size, gfp);
0325 if (page)
0326 return page;
0327 }
0328 }
0329 #endif
0330 if (!dma_contiguous_default_area)
0331 return NULL;
0332
0333 return cma_alloc_aligned(dma_contiguous_default_area, size, gfp);
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
0348 {
0349 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
0350
0351
0352 if (dev->cma_area) {
0353 if (cma_release(dev->cma_area, page, count))
0354 return;
0355 } else {
0356
0357
0358
0359 #ifdef CONFIG_DMA_PERNUMA_CMA
0360 if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
0361 page, count))
0362 return;
0363 #endif
0364 if (cma_release(dma_contiguous_default_area, page, count))
0365 return;
0366 }
0367
0368
0369 __free_pages(page, get_order(size));
0370 }
0371
0372
0373
0374
0375 #ifdef CONFIG_OF_RESERVED_MEM
0376 #include <linux/of.h>
0377 #include <linux/of_fdt.h>
0378 #include <linux/of_reserved_mem.h>
0379
0380 #undef pr_fmt
0381 #define pr_fmt(fmt) fmt
0382
0383 static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
0384 {
0385 dev->cma_area = rmem->priv;
0386 return 0;
0387 }
0388
0389 static void rmem_cma_device_release(struct reserved_mem *rmem,
0390 struct device *dev)
0391 {
0392 dev->cma_area = NULL;
0393 }
0394
0395 static const struct reserved_mem_ops rmem_cma_ops = {
0396 .device_init = rmem_cma_device_init,
0397 .device_release = rmem_cma_device_release,
0398 };
0399
0400 static int __init rmem_cma_setup(struct reserved_mem *rmem)
0401 {
0402 unsigned long node = rmem->fdt_node;
0403 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
0404 struct cma *cma;
0405 int err;
0406
0407 if (size_cmdline != -1 && default_cma) {
0408 pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
0409 rmem->name);
0410 return -EBUSY;
0411 }
0412
0413 if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
0414 of_get_flat_dt_prop(node, "no-map", NULL))
0415 return -EINVAL;
0416
0417 if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) {
0418 pr_err("Reserved memory: incorrect alignment of CMA region\n");
0419 return -EINVAL;
0420 }
0421
0422 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
0423 if (err) {
0424 pr_err("Reserved memory: unable to setup CMA region\n");
0425 return err;
0426 }
0427
0428 dma_contiguous_early_fixup(rmem->base, rmem->size);
0429
0430 if (default_cma)
0431 dma_contiguous_default_area = cma;
0432
0433 rmem->ops = &rmem_cma_ops;
0434 rmem->priv = cma;
0435
0436 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
0437 &rmem->base, (unsigned long)rmem->size / SZ_1M);
0438
0439 return 0;
0440 }
0441 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
0442 #endif