0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/mm.h>
0011 #include <linux/trace_events.h>
0012 #include <linux/memcontrol.h>
0013 #include <trace/events/mmflags.h>
0014 #include <linux/migrate.h>
0015 #include <linux/page_owner.h>
0016 #include <linux/ctype.h>
0017
0018 #include "internal.h"
0019 #include <trace/events/migrate.h>
0020
0021
0022
0023
0024
0025 #undef EM
0026 #undef EMe
0027 #define EM(a, b) b,
0028 #define EMe(a, b) b
0029
0030 const char *migrate_reason_names[MR_TYPES] = {
0031 MIGRATE_REASON
0032 };
0033
0034 const struct trace_print_flags pageflag_names[] = {
0035 __def_pageflag_names,
0036 {0, NULL}
0037 };
0038
0039 const struct trace_print_flags gfpflag_names[] = {
0040 __def_gfpflag_names,
0041 {0, NULL}
0042 };
0043
0044 const struct trace_print_flags vmaflag_names[] = {
0045 __def_vmaflag_names,
0046 {0, NULL}
0047 };
0048
0049 static void __dump_page(struct page *page)
0050 {
0051 struct folio *folio = page_folio(page);
0052 struct page *head = &folio->page;
0053 struct address_space *mapping;
0054 bool compound = PageCompound(page);
0055
0056
0057
0058
0059
0060
0061 bool page_cma = is_migrate_cma_page(page);
0062 int mapcount;
0063 char *type = "";
0064
0065 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
0066
0067
0068
0069
0070
0071
0072
0073 unsigned long tmp = (unsigned long)page->mapping;
0074
0075 if (tmp & PAGE_MAPPING_ANON)
0076 mapping = NULL;
0077 else
0078 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
0079 head = page;
0080 folio = (struct folio *)page;
0081 compound = false;
0082 } else {
0083 mapping = page_mapping(page);
0084 }
0085
0086
0087
0088
0089
0090
0091 mapcount = PageSlab(head) ? 0 : page_mapcount(page);
0092
0093 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
0094 page, page_ref_count(head), mapcount, mapping,
0095 page_to_pgoff(page), page_to_pfn(page));
0096 if (compound) {
0097 pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
0098 head, compound_order(head),
0099 folio_entire_mapcount(folio),
0100 head_compound_pincount(head));
0101 }
0102
0103 #ifdef CONFIG_MEMCG
0104 if (head->memcg_data)
0105 pr_warn("memcg:%lx\n", head->memcg_data);
0106 #endif
0107 if (PageKsm(page))
0108 type = "ksm ";
0109 else if (PageAnon(page))
0110 type = "anon ";
0111 else if (mapping)
0112 dump_mapping(mapping);
0113 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
0114
0115 pr_warn("%sflags: %pGp%s\n", type, &head->flags,
0116 page_cma ? " CMA" : "");
0117 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
0118 sizeof(unsigned long), page,
0119 sizeof(struct page), false);
0120 if (head != page)
0121 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
0122 sizeof(unsigned long), head,
0123 sizeof(struct page), false);
0124 }
0125
0126 void dump_page(struct page *page, const char *reason)
0127 {
0128 if (PagePoisoned(page))
0129 pr_warn("page:%p is uninitialized and poisoned", page);
0130 else
0131 __dump_page(page);
0132 if (reason)
0133 pr_warn("page dumped because: %s\n", reason);
0134 dump_page_owner(page);
0135 }
0136 EXPORT_SYMBOL(dump_page);
0137
0138 #ifdef CONFIG_DEBUG_VM
0139
0140 void dump_vma(const struct vm_area_struct *vma)
0141 {
0142 pr_emerg("vma %px start %px end %px\n"
0143 "next %px prev %px mm %px\n"
0144 "prot %lx anon_vma %px vm_ops %px\n"
0145 "pgoff %lx file %px private_data %px\n"
0146 "flags: %#lx(%pGv)\n",
0147 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
0148 vma->vm_prev, vma->vm_mm,
0149 (unsigned long)pgprot_val(vma->vm_page_prot),
0150 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
0151 vma->vm_file, vma->vm_private_data,
0152 vma->vm_flags, &vma->vm_flags);
0153 }
0154 EXPORT_SYMBOL(dump_vma);
0155
0156 void dump_mm(const struct mm_struct *mm)
0157 {
0158 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
0159 #ifdef CONFIG_MMU
0160 "get_unmapped_area %px\n"
0161 #endif
0162 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
0163 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
0164 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
0165 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
0166 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
0167 "start_brk %lx brk %lx start_stack %lx\n"
0168 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
0169 "binfmt %px flags %lx\n"
0170 #ifdef CONFIG_AIO
0171 "ioctx_table %px\n"
0172 #endif
0173 #ifdef CONFIG_MEMCG
0174 "owner %px "
0175 #endif
0176 "exe_file %px\n"
0177 #ifdef CONFIG_MMU_NOTIFIER
0178 "notifier_subscriptions %px\n"
0179 #endif
0180 #ifdef CONFIG_NUMA_BALANCING
0181 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
0182 #endif
0183 "tlb_flush_pending %d\n"
0184 "def_flags: %#lx(%pGv)\n",
0185
0186 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
0187 #ifdef CONFIG_MMU
0188 mm->get_unmapped_area,
0189 #endif
0190 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
0191 mm->pgd, atomic_read(&mm->mm_users),
0192 atomic_read(&mm->mm_count),
0193 mm_pgtables_bytes(mm),
0194 mm->map_count,
0195 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
0196 (u64)atomic64_read(&mm->pinned_vm),
0197 mm->data_vm, mm->exec_vm, mm->stack_vm,
0198 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
0199 mm->start_brk, mm->brk, mm->start_stack,
0200 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
0201 mm->binfmt, mm->flags,
0202 #ifdef CONFIG_AIO
0203 mm->ioctx_table,
0204 #endif
0205 #ifdef CONFIG_MEMCG
0206 mm->owner,
0207 #endif
0208 mm->exe_file,
0209 #ifdef CONFIG_MMU_NOTIFIER
0210 mm->notifier_subscriptions,
0211 #endif
0212 #ifdef CONFIG_NUMA_BALANCING
0213 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
0214 #endif
0215 atomic_read(&mm->tlb_flush_pending),
0216 mm->def_flags, &mm->def_flags
0217 );
0218 }
0219
0220 static bool page_init_poisoning __read_mostly = true;
0221
0222 static int __init setup_vm_debug(char *str)
0223 {
0224 bool __page_init_poisoning = true;
0225
0226
0227
0228
0229
0230 if (*str++ != '=' || !*str)
0231 goto out;
0232
0233 __page_init_poisoning = false;
0234 if (*str == '-')
0235 goto out;
0236
0237 while (*str) {
0238 switch (tolower(*str)) {
0239 case'p':
0240 __page_init_poisoning = true;
0241 break;
0242 default:
0243 pr_err("vm_debug option '%c' unknown. skipped\n",
0244 *str);
0245 }
0246
0247 str++;
0248 }
0249 out:
0250 if (page_init_poisoning && !__page_init_poisoning)
0251 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
0252
0253 page_init_poisoning = __page_init_poisoning;
0254
0255 return 1;
0256 }
0257 __setup("vm_debug", setup_vm_debug);
0258
0259 void page_init_poison(struct page *page, size_t size)
0260 {
0261 if (page_init_poisoning)
0262 memset(page, PAGE_POISON_PATTERN, size);
0263 }
0264 #endif