0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/debugfs.h>
0010 #include <linux/cma.h>
0011 #include <linux/list.h>
0012 #include <linux/kernel.h>
0013 #include <linux/slab.h>
0014 #include <linux/mm_types.h>
0015
0016 #include "cma.h"
0017
0018 struct cma_mem {
0019 struct hlist_node node;
0020 struct page *p;
0021 unsigned long n;
0022 };
0023
0024 static int cma_debugfs_get(void *data, u64 *val)
0025 {
0026 unsigned long *p = data;
0027
0028 *val = *p;
0029
0030 return 0;
0031 }
0032 DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
0033
0034 static int cma_used_get(void *data, u64 *val)
0035 {
0036 struct cma *cma = data;
0037 unsigned long used;
0038
0039 spin_lock_irq(&cma->lock);
0040
0041 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
0042 spin_unlock_irq(&cma->lock);
0043 *val = (u64)used << cma->order_per_bit;
0044
0045 return 0;
0046 }
0047 DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
0048
0049 static int cma_maxchunk_get(void *data, u64 *val)
0050 {
0051 struct cma *cma = data;
0052 unsigned long maxchunk = 0;
0053 unsigned long start, end = 0;
0054 unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
0055
0056 spin_lock_irq(&cma->lock);
0057 for (;;) {
0058 start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
0059 if (start >= bitmap_maxno)
0060 break;
0061 end = find_next_bit(cma->bitmap, bitmap_maxno, start);
0062 maxchunk = max(end - start, maxchunk);
0063 }
0064 spin_unlock_irq(&cma->lock);
0065 *val = (u64)maxchunk << cma->order_per_bit;
0066
0067 return 0;
0068 }
0069 DEFINE_DEBUGFS_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
0070
0071 static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
0072 {
0073 spin_lock(&cma->mem_head_lock);
0074 hlist_add_head(&mem->node, &cma->mem_head);
0075 spin_unlock(&cma->mem_head_lock);
0076 }
0077
0078 static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
0079 {
0080 struct cma_mem *mem = NULL;
0081
0082 spin_lock(&cma->mem_head_lock);
0083 if (!hlist_empty(&cma->mem_head)) {
0084 mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
0085 hlist_del_init(&mem->node);
0086 }
0087 spin_unlock(&cma->mem_head_lock);
0088
0089 return mem;
0090 }
0091
0092 static int cma_free_mem(struct cma *cma, int count)
0093 {
0094 struct cma_mem *mem = NULL;
0095
0096 while (count) {
0097 mem = cma_get_entry_from_list(cma);
0098 if (mem == NULL)
0099 return 0;
0100
0101 if (mem->n <= count) {
0102 cma_release(cma, mem->p, mem->n);
0103 count -= mem->n;
0104 kfree(mem);
0105 } else if (cma->order_per_bit == 0) {
0106 cma_release(cma, mem->p, count);
0107 mem->p += count;
0108 mem->n -= count;
0109 count = 0;
0110 cma_add_to_cma_mem_list(cma, mem);
0111 } else {
0112 pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
0113 cma_add_to_cma_mem_list(cma, mem);
0114 break;
0115 }
0116 }
0117
0118 return 0;
0119
0120 }
0121
0122 static int cma_free_write(void *data, u64 val)
0123 {
0124 int pages = val;
0125 struct cma *cma = data;
0126
0127 return cma_free_mem(cma, pages);
0128 }
0129 DEFINE_DEBUGFS_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
0130
0131 static int cma_alloc_mem(struct cma *cma, int count)
0132 {
0133 struct cma_mem *mem;
0134 struct page *p;
0135
0136 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
0137 if (!mem)
0138 return -ENOMEM;
0139
0140 p = cma_alloc(cma, count, 0, false);
0141 if (!p) {
0142 kfree(mem);
0143 return -ENOMEM;
0144 }
0145
0146 mem->p = p;
0147 mem->n = count;
0148
0149 cma_add_to_cma_mem_list(cma, mem);
0150
0151 return 0;
0152 }
0153
0154 static int cma_alloc_write(void *data, u64 val)
0155 {
0156 int pages = val;
0157 struct cma *cma = data;
0158
0159 return cma_alloc_mem(cma, pages);
0160 }
0161 DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
0162
0163 static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
0164 {
0165 struct dentry *tmp;
0166 char name[CMA_MAX_NAME];
0167
0168 scnprintf(name, sizeof(name), "cma-%s", cma->name);
0169
0170 tmp = debugfs_create_dir(name, root_dentry);
0171
0172 debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
0173 debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
0174 debugfs_create_file("base_pfn", 0444, tmp,
0175 &cma->base_pfn, &cma_debugfs_fops);
0176 debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
0177 debugfs_create_file("order_per_bit", 0444, tmp,
0178 &cma->order_per_bit, &cma_debugfs_fops);
0179 debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
0180 debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
0181
0182 cma->dfs_bitmap.array = (u32 *)cma->bitmap;
0183 cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma),
0184 BITS_PER_BYTE * sizeof(u32));
0185 debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap);
0186 }
0187
0188 static int __init cma_debugfs_init(void)
0189 {
0190 struct dentry *cma_debugfs_root;
0191 int i;
0192
0193 cma_debugfs_root = debugfs_create_dir("cma", NULL);
0194
0195 for (i = 0; i < cma_area_count; i++)
0196 cma_debugfs_add_one(&cma_areas[i], cma_debugfs_root);
0197
0198 return 0;
0199 }
0200 late_initcall(cma_debugfs_init);