Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (C) IBM Corporation, 2014, 2017
0004  * Anton Blanchard, Rashmica Gupta.
0005  */
0006 
0007 #define pr_fmt(fmt) "memtrace: " fmt
0008 
0009 #include <linux/bitops.h>
0010 #include <linux/string.h>
0011 #include <linux/memblock.h>
0012 #include <linux/init.h>
0013 #include <linux/moduleparam.h>
0014 #include <linux/fs.h>
0015 #include <linux/debugfs.h>
0016 #include <linux/slab.h>
0017 #include <linux/memory.h>
0018 #include <linux/memory_hotplug.h>
0019 #include <linux/numa.h>
0020 #include <asm/machdep.h>
0021 #include <asm/cacheflush.h>
0022 
0023 /* This enables us to keep track of the memory removed from each node. */
0024 struct memtrace_entry {
0025     void *mem;
0026     u64 start;
0027     u64 size;
0028     u32 nid;
0029     struct dentry *dir;
0030     char name[16];
0031 };
0032 
0033 static DEFINE_MUTEX(memtrace_mutex);
0034 static u64 memtrace_size;
0035 
0036 static struct memtrace_entry *memtrace_array;
0037 static unsigned int memtrace_array_nr;
0038 
0039 
0040 static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
0041                  size_t count, loff_t *ppos)
0042 {
0043     struct memtrace_entry *ent = filp->private_data;
0044 
0045     return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
0046 }
0047 
0048 static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
0049 {
0050     struct memtrace_entry *ent = filp->private_data;
0051 
0052     if (ent->size < vma->vm_end - vma->vm_start)
0053         return -EINVAL;
0054 
0055     if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
0056         return -EINVAL;
0057 
0058     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0059     return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
0060                    vma->vm_end - vma->vm_start, vma->vm_page_prot);
0061 }
0062 
0063 static const struct file_operations memtrace_fops = {
0064     .llseek = default_llseek,
0065     .read   = memtrace_read,
0066     .open   = simple_open,
0067     .mmap   = memtrace_mmap,
0068 };
0069 
0070 #define FLUSH_CHUNK_SIZE SZ_1G
0071 /**
0072  * flush_dcache_range_chunked(): Write any modified data cache blocks out to
0073  * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
0074  * Does not invalidate the corresponding instruction cache blocks.
0075  *
0076  * @start: the start address
0077  * @stop: the stop address (exclusive)
0078  * @chunk: the max size of the chunks
0079  */
0080 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
0081                        unsigned long chunk)
0082 {
0083     unsigned long i;
0084 
0085     for (i = start; i < stop; i += chunk) {
0086         flush_dcache_range(i, min(stop, i + chunk));
0087         cond_resched();
0088     }
0089 }
0090 
0091 static void memtrace_clear_range(unsigned long start_pfn,
0092                  unsigned long nr_pages)
0093 {
0094     unsigned long pfn;
0095 
0096     /* As HIGHMEM does not apply, use clear_page() directly. */
0097     for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
0098         if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
0099             cond_resched();
0100         clear_page(__va(PFN_PHYS(pfn)));
0101     }
0102     /*
0103      * Before we go ahead and use this range as cache inhibited range
0104      * flush the cache.
0105      */
0106     flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
0107                    (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
0108                    FLUSH_CHUNK_SIZE);
0109 }
0110 
0111 static u64 memtrace_alloc_node(u32 nid, u64 size)
0112 {
0113     const unsigned long nr_pages = PHYS_PFN(size);
0114     unsigned long pfn, start_pfn;
0115     struct page *page;
0116 
0117     /*
0118      * Trace memory needs to be aligned to the size, which is guaranteed
0119      * by alloc_contig_pages().
0120      */
0121     page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
0122                   __GFP_NOWARN, nid, NULL);
0123     if (!page)
0124         return 0;
0125     start_pfn = page_to_pfn(page);
0126 
0127     /*
0128      * Clear the range while we still have a linear mapping.
0129      *
0130      * TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
0131      */
0132     memtrace_clear_range(start_pfn, nr_pages);
0133 
0134     /*
0135      * Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
0136      * dumping, ...) should be touching these pages.
0137      */
0138     for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
0139         __SetPageOffline(pfn_to_page(pfn));
0140 
0141     arch_remove_linear_mapping(PFN_PHYS(start_pfn), size);
0142 
0143     return PFN_PHYS(start_pfn);
0144 }
0145 
0146 static int memtrace_init_regions_runtime(u64 size)
0147 {
0148     u32 nid;
0149     u64 m;
0150 
0151     memtrace_array = kcalloc(num_online_nodes(),
0152                 sizeof(struct memtrace_entry), GFP_KERNEL);
0153     if (!memtrace_array) {
0154         pr_err("Failed to allocate memtrace_array\n");
0155         return -EINVAL;
0156     }
0157 
0158     for_each_online_node(nid) {
0159         m = memtrace_alloc_node(nid, size);
0160 
0161         /*
0162          * A node might not have any local memory, so warn but
0163          * continue on.
0164          */
0165         if (!m) {
0166             pr_err("Failed to allocate trace memory on node %d\n", nid);
0167             continue;
0168         }
0169 
0170         pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
0171 
0172         memtrace_array[memtrace_array_nr].start = m;
0173         memtrace_array[memtrace_array_nr].size = size;
0174         memtrace_array[memtrace_array_nr].nid = nid;
0175         memtrace_array_nr++;
0176     }
0177 
0178     return 0;
0179 }
0180 
0181 static struct dentry *memtrace_debugfs_dir;
0182 
0183 static int memtrace_init_debugfs(void)
0184 {
0185     int ret = 0;
0186     int i;
0187 
0188     for (i = 0; i < memtrace_array_nr; i++) {
0189         struct dentry *dir;
0190         struct memtrace_entry *ent = &memtrace_array[i];
0191 
0192         ent->mem = ioremap(ent->start, ent->size);
0193         /* Warn but continue on */
0194         if (!ent->mem) {
0195             pr_err("Failed to map trace memory at 0x%llx\n",
0196                  ent->start);
0197             ret = -1;
0198             continue;
0199         }
0200 
0201         snprintf(ent->name, 16, "%08x", ent->nid);
0202         dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
0203 
0204         ent->dir = dir;
0205         debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops);
0206         debugfs_create_x64("start", 0400, dir, &ent->start);
0207         debugfs_create_x64("size", 0400, dir, &ent->size);
0208     }
0209 
0210     return ret;
0211 }
0212 
0213 static int memtrace_free(int nid, u64 start, u64 size)
0214 {
0215     struct mhp_params params = { .pgprot = PAGE_KERNEL };
0216     const unsigned long nr_pages = PHYS_PFN(size);
0217     const unsigned long start_pfn = PHYS_PFN(start);
0218     unsigned long pfn;
0219     int ret;
0220 
0221     ret = arch_create_linear_mapping(nid, start, size, &params);
0222     if (ret)
0223         return ret;
0224 
0225     for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
0226         __ClearPageOffline(pfn_to_page(pfn));
0227 
0228     free_contig_range(start_pfn, nr_pages);
0229     return 0;
0230 }
0231 
0232 /*
0233  * Iterate through the chunks of memory we allocated and attempt to expose
0234  * them back to the kernel.
0235  */
0236 static int memtrace_free_regions(void)
0237 {
0238     int i, ret = 0;
0239     struct memtrace_entry *ent;
0240 
0241     for (i = memtrace_array_nr - 1; i >= 0; i--) {
0242         ent = &memtrace_array[i];
0243 
0244         /* We have freed this chunk previously */
0245         if (ent->nid == NUMA_NO_NODE)
0246             continue;
0247 
0248         /* Remove from io mappings */
0249         if (ent->mem) {
0250             iounmap(ent->mem);
0251             ent->mem = 0;
0252         }
0253 
0254         if (memtrace_free(ent->nid, ent->start, ent->size)) {
0255             pr_err("Failed to free trace memory on node %d\n",
0256                 ent->nid);
0257             ret += 1;
0258             continue;
0259         }
0260 
0261         /*
0262          * Memory was freed successfully so clean up references to it
0263          * so on reentry we can tell that this chunk was freed.
0264          */
0265         debugfs_remove_recursive(ent->dir);
0266         pr_info("Freed trace memory back on node %d\n", ent->nid);
0267         ent->size = ent->start = ent->nid = NUMA_NO_NODE;
0268     }
0269     if (ret)
0270         return ret;
0271 
0272     /* If all chunks of memory were freed successfully, reset globals */
0273     kfree(memtrace_array);
0274     memtrace_array = NULL;
0275     memtrace_size = 0;
0276     memtrace_array_nr = 0;
0277     return 0;
0278 }
0279 
0280 static int memtrace_enable_set(void *data, u64 val)
0281 {
0282     int rc = -EAGAIN;
0283     u64 bytes;
0284 
0285     /*
0286      * Don't attempt to do anything if size isn't aligned to a memory
0287      * block or equal to zero.
0288      */
0289     bytes = memory_block_size_bytes();
0290     if (val & (bytes - 1)) {
0291         pr_err("Value must be aligned with 0x%llx\n", bytes);
0292         return -EINVAL;
0293     }
0294 
0295     mutex_lock(&memtrace_mutex);
0296 
0297     /* Free all previously allocated memory. */
0298     if (memtrace_size && memtrace_free_regions())
0299         goto out_unlock;
0300 
0301     if (!val) {
0302         rc = 0;
0303         goto out_unlock;
0304     }
0305 
0306     /* Allocate memory. */
0307     if (memtrace_init_regions_runtime(val))
0308         goto out_unlock;
0309 
0310     if (memtrace_init_debugfs())
0311         goto out_unlock;
0312 
0313     memtrace_size = val;
0314     rc = 0;
0315 out_unlock:
0316     mutex_unlock(&memtrace_mutex);
0317     return rc;
0318 }
0319 
0320 static int memtrace_enable_get(void *data, u64 *val)
0321 {
0322     *val = memtrace_size;
0323     return 0;
0324 }
0325 
0326 DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
0327                     memtrace_enable_set, "0x%016llx\n");
0328 
0329 static int memtrace_init(void)
0330 {
0331     memtrace_debugfs_dir = debugfs_create_dir("memtrace",
0332                           arch_debugfs_dir);
0333 
0334     debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
0335                 NULL, &memtrace_init_fops);
0336 
0337     return 0;
0338 }
0339 machine_device_initcall(powernv, memtrace_init);