Back to home page

LXR

 
 

    


0001 /*
0002  * CMA DebugFS Interface
0003  *
0004  * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
0005  */
0006 
0007 
0008 #include <linux/debugfs.h>
0009 #include <linux/cma.h>
0010 #include <linux/list.h>
0011 #include <linux/kernel.h>
0012 #include <linux/slab.h>
0013 #include <linux/mm_types.h>
0014 
0015 #include "cma.h"
0016 
0017 struct cma_mem {
0018     struct hlist_node node;
0019     struct page *p;
0020     unsigned long n;
0021 };
0022 
0023 static struct dentry *cma_debugfs_root;
0024 
0025 static int cma_debugfs_get(void *data, u64 *val)
0026 {
0027     unsigned long *p = data;
0028 
0029     *val = *p;
0030 
0031     return 0;
0032 }
0033 DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
0034 
0035 static int cma_used_get(void *data, u64 *val)
0036 {
0037     struct cma *cma = data;
0038     unsigned long used;
0039 
0040     mutex_lock(&cma->lock);
0041     /* pages counter is smaller than sizeof(int) */
0042     used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
0043     mutex_unlock(&cma->lock);
0044     *val = (u64)used << cma->order_per_bit;
0045 
0046     return 0;
0047 }
0048 DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
0049 
0050 static int cma_maxchunk_get(void *data, u64 *val)
0051 {
0052     struct cma *cma = data;
0053     unsigned long maxchunk = 0;
0054     unsigned long start, end = 0;
0055     unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
0056 
0057     mutex_lock(&cma->lock);
0058     for (;;) {
0059         start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
0060         if (start >= cma->count)
0061             break;
0062         end = find_next_bit(cma->bitmap, bitmap_maxno, start);
0063         maxchunk = max(end - start, maxchunk);
0064     }
0065     mutex_unlock(&cma->lock);
0066     *val = (u64)maxchunk << cma->order_per_bit;
0067 
0068     return 0;
0069 }
0070 DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
0071 
0072 static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
0073 {
0074     spin_lock(&cma->mem_head_lock);
0075     hlist_add_head(&mem->node, &cma->mem_head);
0076     spin_unlock(&cma->mem_head_lock);
0077 }
0078 
0079 static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
0080 {
0081     struct cma_mem *mem = NULL;
0082 
0083     spin_lock(&cma->mem_head_lock);
0084     if (!hlist_empty(&cma->mem_head)) {
0085         mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
0086         hlist_del_init(&mem->node);
0087     }
0088     spin_unlock(&cma->mem_head_lock);
0089 
0090     return mem;
0091 }
0092 
0093 static int cma_free_mem(struct cma *cma, int count)
0094 {
0095     struct cma_mem *mem = NULL;
0096 
0097     while (count) {
0098         mem = cma_get_entry_from_list(cma);
0099         if (mem == NULL)
0100             return 0;
0101 
0102         if (mem->n <= count) {
0103             cma_release(cma, mem->p, mem->n);
0104             count -= mem->n;
0105             kfree(mem);
0106         } else if (cma->order_per_bit == 0) {
0107             cma_release(cma, mem->p, count);
0108             mem->p += count;
0109             mem->n -= count;
0110             count = 0;
0111             cma_add_to_cma_mem_list(cma, mem);
0112         } else {
0113             pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
0114             cma_add_to_cma_mem_list(cma, mem);
0115             break;
0116         }
0117     }
0118 
0119     return 0;
0120 
0121 }
0122 
0123 static int cma_free_write(void *data, u64 val)
0124 {
0125     int pages = val;
0126     struct cma *cma = data;
0127 
0128     return cma_free_mem(cma, pages);
0129 }
0130 DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
0131 
0132 static int cma_alloc_mem(struct cma *cma, int count)
0133 {
0134     struct cma_mem *mem;
0135     struct page *p;
0136 
0137     mem = kzalloc(sizeof(*mem), GFP_KERNEL);
0138     if (!mem)
0139         return -ENOMEM;
0140 
0141     p = cma_alloc(cma, count, 0);
0142     if (!p) {
0143         kfree(mem);
0144         return -ENOMEM;
0145     }
0146 
0147     mem->p = p;
0148     mem->n = count;
0149 
0150     cma_add_to_cma_mem_list(cma, mem);
0151 
0152     return 0;
0153 }
0154 
0155 static int cma_alloc_write(void *data, u64 val)
0156 {
0157     int pages = val;
0158     struct cma *cma = data;
0159 
0160     return cma_alloc_mem(cma, pages);
0161 }
0162 DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
0163 
0164 static void cma_debugfs_add_one(struct cma *cma, int idx)
0165 {
0166     struct dentry *tmp;
0167     char name[16];
0168     int u32s;
0169 
0170     sprintf(name, "cma-%d", idx);
0171 
0172     tmp = debugfs_create_dir(name, cma_debugfs_root);
0173 
0174     debugfs_create_file("alloc", S_IWUSR, tmp, cma,
0175                 &cma_alloc_fops);
0176 
0177     debugfs_create_file("free", S_IWUSR, tmp, cma,
0178                 &cma_free_fops);
0179 
0180     debugfs_create_file("base_pfn", S_IRUGO, tmp,
0181                 &cma->base_pfn, &cma_debugfs_fops);
0182     debugfs_create_file("count", S_IRUGO, tmp,
0183                 &cma->count, &cma_debugfs_fops);
0184     debugfs_create_file("order_per_bit", S_IRUGO, tmp,
0185                 &cma->order_per_bit, &cma_debugfs_fops);
0186     debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
0187     debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
0188 
0189     u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
0190     debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
0191 }
0192 
0193 static int __init cma_debugfs_init(void)
0194 {
0195     int i;
0196 
0197     cma_debugfs_root = debugfs_create_dir("cma", NULL);
0198     if (!cma_debugfs_root)
0199         return -ENOMEM;
0200 
0201     for (i = 0; i < cma_area_count; i++)
0202         cma_debugfs_add_one(&cma_areas[i], i);
0203 
0204     return 0;
0205 }
0206 late_initcall(cma_debugfs_init);