Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2012 ARM Ltd.
0004  * Copyright (C) 2020 Google LLC
0005  */
0006 #include <linux/cma.h>
0007 #include <linux/debugfs.h>
0008 #include <linux/dma-map-ops.h>
0009 #include <linux/dma-direct.h>
0010 #include <linux/init.h>
0011 #include <linux/genalloc.h>
0012 #include <linux/set_memory.h>
0013 #include <linux/slab.h>
0014 #include <linux/workqueue.h>
0015 
0016 static struct gen_pool *atomic_pool_dma __ro_after_init;
0017 static unsigned long pool_size_dma;
0018 static struct gen_pool *atomic_pool_dma32 __ro_after_init;
0019 static unsigned long pool_size_dma32;
0020 static struct gen_pool *atomic_pool_kernel __ro_after_init;
0021 static unsigned long pool_size_kernel;
0022 
0023 /* Size can be defined by the coherent_pool command line */
0024 static size_t atomic_pool_size;
0025 
0026 /* Dynamic background expansion when the atomic pool is near capacity */
0027 static struct work_struct atomic_pool_work;
0028 
0029 static int __init early_coherent_pool(char *p)
0030 {
0031     atomic_pool_size = memparse(p, &p);
0032     return 0;
0033 }
0034 early_param("coherent_pool", early_coherent_pool);
0035 
0036 static void __init dma_atomic_pool_debugfs_init(void)
0037 {
0038     struct dentry *root;
0039 
0040     root = debugfs_create_dir("dma_pools", NULL);
0041     debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
0042     debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
0043     debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
0044 }
0045 
0046 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
0047 {
0048     if (gfp & __GFP_DMA)
0049         pool_size_dma += size;
0050     else if (gfp & __GFP_DMA32)
0051         pool_size_dma32 += size;
0052     else
0053         pool_size_kernel += size;
0054 }
0055 
0056 static bool cma_in_zone(gfp_t gfp)
0057 {
0058     unsigned long size;
0059     phys_addr_t end;
0060     struct cma *cma;
0061 
0062     cma = dev_get_cma_area(NULL);
0063     if (!cma)
0064         return false;
0065 
0066     size = cma_get_size(cma);
0067     if (!size)
0068         return false;
0069 
0070     /* CMA can't cross zone boundaries, see cma_activate_area() */
0071     end = cma_get_base(cma) + size - 1;
0072     if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
0073         return end <= DMA_BIT_MASK(zone_dma_bits);
0074     if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
0075         return end <= DMA_BIT_MASK(32);
0076     return true;
0077 }
0078 
0079 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
0080                   gfp_t gfp)
0081 {
0082     unsigned int order;
0083     struct page *page = NULL;
0084     void *addr;
0085     int ret = -ENOMEM;
0086 
0087     /* Cannot allocate larger than MAX_ORDER-1 */
0088     order = min(get_order(pool_size), MAX_ORDER-1);
0089 
0090     do {
0091         pool_size = 1 << (PAGE_SHIFT + order);
0092         if (cma_in_zone(gfp))
0093             page = dma_alloc_from_contiguous(NULL, 1 << order,
0094                              order, false);
0095         if (!page)
0096             page = alloc_pages(gfp, order);
0097     } while (!page && order-- > 0);
0098     if (!page)
0099         goto out;
0100 
0101     arch_dma_prep_coherent(page, pool_size);
0102 
0103 #ifdef CONFIG_DMA_DIRECT_REMAP
0104     addr = dma_common_contiguous_remap(page, pool_size,
0105                        pgprot_dmacoherent(PAGE_KERNEL),
0106                        __builtin_return_address(0));
0107     if (!addr)
0108         goto free_page;
0109 #else
0110     addr = page_to_virt(page);
0111 #endif
0112     /*
0113      * Memory in the atomic DMA pools must be unencrypted, the pools do not
0114      * shrink so no re-encryption occurs in dma_direct_free().
0115      */
0116     ret = set_memory_decrypted((unsigned long)page_to_virt(page),
0117                    1 << order);
0118     if (ret)
0119         goto remove_mapping;
0120     ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
0121                 pool_size, NUMA_NO_NODE);
0122     if (ret)
0123         goto encrypt_mapping;
0124 
0125     dma_atomic_pool_size_add(gfp, pool_size);
0126     return 0;
0127 
0128 encrypt_mapping:
0129     ret = set_memory_encrypted((unsigned long)page_to_virt(page),
0130                    1 << order);
0131     if (WARN_ON_ONCE(ret)) {
0132         /* Decrypt succeeded but encrypt failed, purposely leak */
0133         goto out;
0134     }
0135 remove_mapping:
0136 #ifdef CONFIG_DMA_DIRECT_REMAP
0137     dma_common_free_remap(addr, pool_size);
0138 #endif
0139 free_page: __maybe_unused
0140     __free_pages(page, order);
0141 out:
0142     return ret;
0143 }
0144 
0145 static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
0146 {
0147     if (pool && gen_pool_avail(pool) < atomic_pool_size)
0148         atomic_pool_expand(pool, gen_pool_size(pool), gfp);
0149 }
0150 
0151 static void atomic_pool_work_fn(struct work_struct *work)
0152 {
0153     if (IS_ENABLED(CONFIG_ZONE_DMA))
0154         atomic_pool_resize(atomic_pool_dma,
0155                    GFP_KERNEL | GFP_DMA);
0156     if (IS_ENABLED(CONFIG_ZONE_DMA32))
0157         atomic_pool_resize(atomic_pool_dma32,
0158                    GFP_KERNEL | GFP_DMA32);
0159     atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
0160 }
0161 
0162 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
0163                               gfp_t gfp)
0164 {
0165     struct gen_pool *pool;
0166     int ret;
0167 
0168     pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
0169     if (!pool)
0170         return NULL;
0171 
0172     gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
0173 
0174     ret = atomic_pool_expand(pool, pool_size, gfp);
0175     if (ret) {
0176         gen_pool_destroy(pool);
0177         pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
0178                pool_size >> 10, &gfp);
0179         return NULL;
0180     }
0181 
0182     pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
0183         gen_pool_size(pool) >> 10, &gfp);
0184     return pool;
0185 }
0186 
0187 static int __init dma_atomic_pool_init(void)
0188 {
0189     int ret = 0;
0190 
0191     /*
0192      * If coherent_pool was not used on the command line, default the pool
0193      * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
0194      */
0195     if (!atomic_pool_size) {
0196         unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
0197         pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
0198         atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
0199     }
0200     INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
0201 
0202     atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
0203                             GFP_KERNEL);
0204     if (!atomic_pool_kernel)
0205         ret = -ENOMEM;
0206     if (has_managed_dma()) {
0207         atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
0208                         GFP_KERNEL | GFP_DMA);
0209         if (!atomic_pool_dma)
0210             ret = -ENOMEM;
0211     }
0212     if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
0213         atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
0214                         GFP_KERNEL | GFP_DMA32);
0215         if (!atomic_pool_dma32)
0216             ret = -ENOMEM;
0217     }
0218 
0219     dma_atomic_pool_debugfs_init();
0220     return ret;
0221 }
0222 postcore_initcall(dma_atomic_pool_init);
0223 
0224 static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
0225 {
0226     if (prev == NULL) {
0227         if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
0228             return atomic_pool_dma32;
0229         if (atomic_pool_dma && (gfp & GFP_DMA))
0230             return atomic_pool_dma;
0231         return atomic_pool_kernel;
0232     }
0233     if (prev == atomic_pool_kernel)
0234         return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
0235     if (prev == atomic_pool_dma32)
0236         return atomic_pool_dma;
0237     return NULL;
0238 }
0239 
0240 static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
0241         struct gen_pool *pool, void **cpu_addr,
0242         bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
0243 {
0244     unsigned long addr;
0245     phys_addr_t phys;
0246 
0247     addr = gen_pool_alloc(pool, size);
0248     if (!addr)
0249         return NULL;
0250 
0251     phys = gen_pool_virt_to_phys(pool, addr);
0252     if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
0253         gen_pool_free(pool, addr, size);
0254         return NULL;
0255     }
0256 
0257     if (gen_pool_avail(pool) < atomic_pool_size)
0258         schedule_work(&atomic_pool_work);
0259 
0260     *cpu_addr = (void *)addr;
0261     memset(*cpu_addr, 0, size);
0262     return pfn_to_page(__phys_to_pfn(phys));
0263 }
0264 
0265 struct page *dma_alloc_from_pool(struct device *dev, size_t size,
0266         void **cpu_addr, gfp_t gfp,
0267         bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
0268 {
0269     struct gen_pool *pool = NULL;
0270     struct page *page;
0271 
0272     while ((pool = dma_guess_pool(pool, gfp))) {
0273         page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
0274                          phys_addr_ok);
0275         if (page)
0276             return page;
0277     }
0278 
0279     WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
0280     return NULL;
0281 }
0282 
0283 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
0284 {
0285     struct gen_pool *pool = NULL;
0286 
0287     while ((pool = dma_guess_pool(pool, 0))) {
0288         if (!gen_pool_has_addr(pool, (unsigned long)start, size))
0289             continue;
0290         gen_pool_free(pool, (unsigned long)start, size);
0291         return true;
0292     }
0293 
0294     return false;
0295 }