0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/debugfs.h>
0012 #include <linux/fs.h>
0013 #include <linux/mm.h>
0014 #include <linux/seq_file.h>
0015
0016 #include <asm/domain.h>
0017 #include <asm/fixmap.h>
0018 #include <asm/memory.h>
0019 #include <asm/ptdump.h>
0020
0021 static struct addr_marker address_markers[] = {
0022 #ifdef CONFIG_KASAN
0023 { KASAN_SHADOW_START, "Kasan shadow start"},
0024 { KASAN_SHADOW_END, "Kasan shadow end"},
0025 #endif
0026 { MODULES_VADDR, "Modules" },
0027 { PAGE_OFFSET, "Kernel Mapping" },
0028 { 0, "vmalloc() Area" },
0029 { VMALLOC_END, "vmalloc() End" },
0030 { FIXADDR_START, "Fixmap Area" },
0031 { VECTORS_BASE, "Vectors" },
0032 { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
0033 { -1, NULL },
0034 };
0035
0036 #define pt_dump_seq_printf(m, fmt, args...) \
0037 ({ \
0038 if (m) \
0039 seq_printf(m, fmt, ##args); \
0040 })
0041
0042 #define pt_dump_seq_puts(m, fmt) \
0043 ({ \
0044 if (m) \
0045 seq_printf(m, fmt); \
0046 })
0047
0048 struct pg_state {
0049 struct seq_file *seq;
0050 const struct addr_marker *marker;
0051 unsigned long start_address;
0052 unsigned level;
0053 u64 current_prot;
0054 bool check_wx;
0055 unsigned long wx_pages;
0056 const char *current_domain;
0057 };
0058
0059 struct prot_bits {
0060 u64 mask;
0061 u64 val;
0062 const char *set;
0063 const char *clear;
0064 bool ro_bit;
0065 bool nx_bit;
0066 };
0067
0068 static const struct prot_bits pte_bits[] = {
0069 {
0070 .mask = L_PTE_USER,
0071 .val = L_PTE_USER,
0072 .set = "USR",
0073 .clear = " ",
0074 }, {
0075 .mask = L_PTE_RDONLY,
0076 .val = L_PTE_RDONLY,
0077 .set = "ro",
0078 .clear = "RW",
0079 .ro_bit = true,
0080 }, {
0081 .mask = L_PTE_XN,
0082 .val = L_PTE_XN,
0083 .set = "NX",
0084 .clear = "x ",
0085 .nx_bit = true,
0086 }, {
0087 .mask = L_PTE_SHARED,
0088 .val = L_PTE_SHARED,
0089 .set = "SHD",
0090 .clear = " ",
0091 }, {
0092 .mask = L_PTE_MT_MASK,
0093 .val = L_PTE_MT_UNCACHED,
0094 .set = "SO/UNCACHED",
0095 }, {
0096 .mask = L_PTE_MT_MASK,
0097 .val = L_PTE_MT_BUFFERABLE,
0098 .set = "MEM/BUFFERABLE/WC",
0099 }, {
0100 .mask = L_PTE_MT_MASK,
0101 .val = L_PTE_MT_WRITETHROUGH,
0102 .set = "MEM/CACHED/WT",
0103 }, {
0104 .mask = L_PTE_MT_MASK,
0105 .val = L_PTE_MT_WRITEBACK,
0106 .set = "MEM/CACHED/WBRA",
0107 #ifndef CONFIG_ARM_LPAE
0108 }, {
0109 .mask = L_PTE_MT_MASK,
0110 .val = L_PTE_MT_MINICACHE,
0111 .set = "MEM/MINICACHE",
0112 #endif
0113 }, {
0114 .mask = L_PTE_MT_MASK,
0115 .val = L_PTE_MT_WRITEALLOC,
0116 .set = "MEM/CACHED/WBWA",
0117 }, {
0118 .mask = L_PTE_MT_MASK,
0119 .val = L_PTE_MT_DEV_SHARED,
0120 .set = "DEV/SHARED",
0121 #ifndef CONFIG_ARM_LPAE
0122 }, {
0123 .mask = L_PTE_MT_MASK,
0124 .val = L_PTE_MT_DEV_NONSHARED,
0125 .set = "DEV/NONSHARED",
0126 #endif
0127 }, {
0128 .mask = L_PTE_MT_MASK,
0129 .val = L_PTE_MT_DEV_WC,
0130 .set = "DEV/WC",
0131 }, {
0132 .mask = L_PTE_MT_MASK,
0133 .val = L_PTE_MT_DEV_CACHED,
0134 .set = "DEV/CACHED",
0135 },
0136 };
0137
0138 static const struct prot_bits section_bits[] = {
0139 #ifdef CONFIG_ARM_LPAE
0140 {
0141 .mask = PMD_SECT_USER,
0142 .val = PMD_SECT_USER,
0143 .set = "USR",
0144 }, {
0145 .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
0146 .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
0147 .set = "ro",
0148 .clear = "RW",
0149 .ro_bit = true,
0150 #elif __LINUX_ARM_ARCH__ >= 6
0151 {
0152 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0153 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
0154 .set = " ro",
0155 .ro_bit = true,
0156 }, {
0157 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0158 .val = PMD_SECT_AP_WRITE,
0159 .set = " RW",
0160 }, {
0161 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0162 .val = PMD_SECT_AP_READ,
0163 .set = "USR ro",
0164 }, {
0165 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0166 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0167 .set = "USR RW",
0168 #else
0169
0170 {
0171 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0172 .val = 0,
0173 .set = " ro",
0174 .ro_bit = true,
0175 }, {
0176 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0177 .val = PMD_SECT_AP_WRITE,
0178 .set = " RW",
0179 }, {
0180 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0181 .val = PMD_SECT_AP_READ,
0182 .set = "USR ro",
0183 }, {
0184 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0185 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
0186 .set = "USR RW",
0187 #endif
0188 }, {
0189 .mask = PMD_SECT_XN,
0190 .val = PMD_SECT_XN,
0191 .set = "NX",
0192 .clear = "x ",
0193 .nx_bit = true,
0194 }, {
0195 .mask = PMD_SECT_S,
0196 .val = PMD_SECT_S,
0197 .set = "SHD",
0198 .clear = " ",
0199 },
0200 };
0201
0202 struct pg_level {
0203 const struct prot_bits *bits;
0204 size_t num;
0205 u64 mask;
0206 const struct prot_bits *ro_bit;
0207 const struct prot_bits *nx_bit;
0208 };
0209
0210 static struct pg_level pg_level[] = {
0211 {
0212 }, {
0213 }, {
0214 }, {
0215 }, {
0216 .bits = section_bits,
0217 .num = ARRAY_SIZE(section_bits),
0218 }, {
0219 .bits = pte_bits,
0220 .num = ARRAY_SIZE(pte_bits),
0221 },
0222 };
0223
0224 static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
0225 {
0226 unsigned i;
0227
0228 for (i = 0; i < num; i++, bits++) {
0229 const char *s;
0230
0231 if ((st->current_prot & bits->mask) == bits->val)
0232 s = bits->set;
0233 else
0234 s = bits->clear;
0235
0236 if (s)
0237 pt_dump_seq_printf(st->seq, " %s", s);
0238 }
0239 }
0240
0241 static void note_prot_wx(struct pg_state *st, unsigned long addr)
0242 {
0243 if (!st->check_wx)
0244 return;
0245 if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
0246 pg_level[st->level].ro_bit->val)
0247 return;
0248 if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
0249 pg_level[st->level].nx_bit->val)
0250 return;
0251
0252 WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
0253 (void *)st->start_address);
0254
0255 st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
0256 }
0257
0258 static void note_page(struct pg_state *st, unsigned long addr,
0259 unsigned int level, u64 val, const char *domain)
0260 {
0261 static const char units[] = "KMGTPE";
0262 u64 prot = val & pg_level[level].mask;
0263
0264 if (!st->level) {
0265 st->level = level;
0266 st->current_prot = prot;
0267 st->current_domain = domain;
0268 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
0269 } else if (prot != st->current_prot || level != st->level ||
0270 domain != st->current_domain ||
0271 addr >= st->marker[1].start_address) {
0272 const char *unit = units;
0273 unsigned long delta;
0274
0275 if (st->current_prot) {
0276 note_prot_wx(st, addr);
0277 pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
0278 st->start_address, addr);
0279
0280 delta = (addr - st->start_address) >> 10;
0281 while (!(delta & 1023) && unit[1]) {
0282 delta >>= 10;
0283 unit++;
0284 }
0285 pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
0286 if (st->current_domain)
0287 pt_dump_seq_printf(st->seq, " %s",
0288 st->current_domain);
0289 if (pg_level[st->level].bits)
0290 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
0291 pt_dump_seq_printf(st->seq, "\n");
0292 }
0293
0294 if (addr >= st->marker[1].start_address) {
0295 st->marker++;
0296 pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
0297 st->marker->name);
0298 }
0299 st->start_address = addr;
0300 st->current_prot = prot;
0301 st->current_domain = domain;
0302 st->level = level;
0303 }
0304 }
0305
0306 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
0307 const char *domain)
0308 {
0309 pte_t *pte = pte_offset_kernel(pmd, 0);
0310 unsigned long addr;
0311 unsigned i;
0312
0313 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
0314 addr = start + i * PAGE_SIZE;
0315 note_page(st, addr, 5, pte_val(*pte), domain);
0316 }
0317 }
0318
0319 static const char *get_domain_name(pmd_t *pmd)
0320 {
0321 #ifndef CONFIG_ARM_LPAE
0322 switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
0323 case PMD_DOMAIN(DOMAIN_KERNEL):
0324 return "KERNEL ";
0325 case PMD_DOMAIN(DOMAIN_USER):
0326 return "USER ";
0327 case PMD_DOMAIN(DOMAIN_IO):
0328 return "IO ";
0329 case PMD_DOMAIN(DOMAIN_VECTORS):
0330 return "VECTORS";
0331 default:
0332 return "unknown";
0333 }
0334 #endif
0335 return NULL;
0336 }
0337
0338 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
0339 {
0340 pmd_t *pmd = pmd_offset(pud, 0);
0341 unsigned long addr;
0342 unsigned i;
0343 const char *domain;
0344
0345 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
0346 addr = start + i * PMD_SIZE;
0347 domain = get_domain_name(pmd);
0348 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
0349 note_page(st, addr, 3, pmd_val(*pmd), domain);
0350 else
0351 walk_pte(st, pmd, addr, domain);
0352
0353 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
0354 addr += SECTION_SIZE;
0355 pmd++;
0356 domain = get_domain_name(pmd);
0357 note_page(st, addr, 4, pmd_val(*pmd), domain);
0358 }
0359 }
0360 }
0361
0362 static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
0363 {
0364 pud_t *pud = pud_offset(p4d, 0);
0365 unsigned long addr;
0366 unsigned i;
0367
0368 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
0369 addr = start + i * PUD_SIZE;
0370 if (!pud_none(*pud)) {
0371 walk_pmd(st, pud, addr);
0372 } else {
0373 note_page(st, addr, 3, pud_val(*pud), NULL);
0374 }
0375 }
0376 }
0377
0378 static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
0379 {
0380 p4d_t *p4d = p4d_offset(pgd, 0);
0381 unsigned long addr;
0382 unsigned i;
0383
0384 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
0385 addr = start + i * P4D_SIZE;
0386 if (!p4d_none(*p4d)) {
0387 walk_pud(st, p4d, addr);
0388 } else {
0389 note_page(st, addr, 2, p4d_val(*p4d), NULL);
0390 }
0391 }
0392 }
0393
0394 static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
0395 unsigned long start)
0396 {
0397 pgd_t *pgd = pgd_offset(mm, 0UL);
0398 unsigned i;
0399 unsigned long addr;
0400
0401 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
0402 addr = start + i * PGDIR_SIZE;
0403 if (!pgd_none(*pgd)) {
0404 walk_p4d(st, pgd, addr);
0405 } else {
0406 note_page(st, addr, 1, pgd_val(*pgd), NULL);
0407 }
0408 }
0409 }
0410
0411 void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
0412 {
0413 struct pg_state st = {
0414 .seq = m,
0415 .marker = info->markers,
0416 .check_wx = false,
0417 };
0418
0419 walk_pgd(&st, info->mm, info->base_addr);
0420 note_page(&st, 0, 0, 0, NULL);
0421 }
0422
0423 static void __init ptdump_initialize(void)
0424 {
0425 unsigned i, j;
0426
0427 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
0428 if (pg_level[i].bits)
0429 for (j = 0; j < pg_level[i].num; j++) {
0430 pg_level[i].mask |= pg_level[i].bits[j].mask;
0431 if (pg_level[i].bits[j].ro_bit)
0432 pg_level[i].ro_bit = &pg_level[i].bits[j];
0433 if (pg_level[i].bits[j].nx_bit)
0434 pg_level[i].nx_bit = &pg_level[i].bits[j];
0435 }
0436 #ifdef CONFIG_KASAN
0437 address_markers[4].start_address = VMALLOC_START;
0438 #else
0439 address_markers[2].start_address = VMALLOC_START;
0440 #endif
0441 }
0442
0443 static struct ptdump_info kernel_ptdump_info = {
0444 .mm = &init_mm,
0445 .markers = address_markers,
0446 .base_addr = 0,
0447 };
0448
0449 void ptdump_check_wx(void)
0450 {
0451 struct pg_state st = {
0452 .seq = NULL,
0453 .marker = (struct addr_marker[]) {
0454 { 0, NULL},
0455 { -1, NULL},
0456 },
0457 .check_wx = true,
0458 };
0459
0460 walk_pgd(&st, &init_mm, 0);
0461 note_page(&st, 0, 0, 0, NULL);
0462 if (st.wx_pages)
0463 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
0464 st.wx_pages);
0465 else
0466 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
0467 }
0468
0469 static int __init ptdump_init(void)
0470 {
0471 ptdump_initialize();
0472 ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
0473 return 0;
0474 }
0475 __initcall(ptdump_init);