0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/init.h>
0014 #include <linux/kasan.h>
0015 #include <linux/kernel.h>
0016 #include <linux/kfence.h>
0017 #include <linux/kmemleak.h>
0018 #include <linux/memory.h>
0019 #include <linux/mm.h>
0020 #include <linux/string.h>
0021 #include <linux/types.h>
0022 #include <linux/vmalloc.h>
0023
0024 #include <asm/cacheflush.h>
0025 #include <asm/tlbflush.h>
0026
0027 #include "kasan.h"
0028
0029 bool __kasan_check_read(const volatile void *p, unsigned int size)
0030 {
0031 return kasan_check_range((unsigned long)p, size, false, _RET_IP_);
0032 }
0033 EXPORT_SYMBOL(__kasan_check_read);
0034
0035 bool __kasan_check_write(const volatile void *p, unsigned int size)
0036 {
0037 return kasan_check_range((unsigned long)p, size, true, _RET_IP_);
0038 }
0039 EXPORT_SYMBOL(__kasan_check_write);
0040
0041 #undef memset
0042 void *memset(void *addr, int c, size_t len)
0043 {
0044 if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_))
0045 return NULL;
0046
0047 return __memset(addr, c, len);
0048 }
0049
0050 #ifdef __HAVE_ARCH_MEMMOVE
0051 #undef memmove
0052 void *memmove(void *dest, const void *src, size_t len)
0053 {
0054 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
0055 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
0056 return NULL;
0057
0058 return __memmove(dest, src, len);
0059 }
0060 #endif
0061
0062 #undef memcpy
0063 void *memcpy(void *dest, const void *src, size_t len)
0064 {
0065 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
0066 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
0067 return NULL;
0068
0069 return __memcpy(dest, src, len);
0070 }
0071
0072 void kasan_poison(const void *addr, size_t size, u8 value, bool init)
0073 {
0074 void *shadow_start, *shadow_end;
0075
0076 if (!kasan_arch_is_ready())
0077 return;
0078
0079
0080
0081
0082
0083
0084 addr = kasan_reset_tag(addr);
0085
0086
0087 if (is_kfence_address(addr))
0088 return;
0089
0090 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
0091 return;
0092 if (WARN_ON(size & KASAN_GRANULE_MASK))
0093 return;
0094
0095 shadow_start = kasan_mem_to_shadow(addr);
0096 shadow_end = kasan_mem_to_shadow(addr + size);
0097
0098 __memset(shadow_start, value, shadow_end - shadow_start);
0099 }
0100 EXPORT_SYMBOL(kasan_poison);
0101
0102 #ifdef CONFIG_KASAN_GENERIC
0103 void kasan_poison_last_granule(const void *addr, size_t size)
0104 {
0105 if (!kasan_arch_is_ready())
0106 return;
0107
0108 if (size & KASAN_GRANULE_MASK) {
0109 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
0110 *shadow = size & KASAN_GRANULE_MASK;
0111 }
0112 }
0113 #endif
0114
0115 void kasan_unpoison(const void *addr, size_t size, bool init)
0116 {
0117 u8 tag = get_tag(addr);
0118
0119
0120
0121
0122
0123
0124 addr = kasan_reset_tag(addr);
0125
0126
0127
0128
0129
0130
0131 if (is_kfence_address(addr))
0132 return;
0133
0134 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
0135 return;
0136
0137
0138 kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false);
0139
0140
0141 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0142 kasan_poison_last_granule(addr, size);
0143 }
0144
0145 #ifdef CONFIG_MEMORY_HOTPLUG
0146 static bool shadow_mapped(unsigned long addr)
0147 {
0148 pgd_t *pgd = pgd_offset_k(addr);
0149 p4d_t *p4d;
0150 pud_t *pud;
0151 pmd_t *pmd;
0152 pte_t *pte;
0153
0154 if (pgd_none(*pgd))
0155 return false;
0156 p4d = p4d_offset(pgd, addr);
0157 if (p4d_none(*p4d))
0158 return false;
0159 pud = pud_offset(p4d, addr);
0160 if (pud_none(*pud))
0161 return false;
0162
0163
0164
0165
0166
0167
0168 if (pud_bad(*pud))
0169 return true;
0170 pmd = pmd_offset(pud, addr);
0171 if (pmd_none(*pmd))
0172 return false;
0173
0174 if (pmd_bad(*pmd))
0175 return true;
0176 pte = pte_offset_kernel(pmd, addr);
0177 return !pte_none(*pte);
0178 }
0179
0180 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
0181 unsigned long action, void *data)
0182 {
0183 struct memory_notify *mem_data = data;
0184 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
0185 unsigned long shadow_end, shadow_size;
0186
0187 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
0188 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
0189 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
0190 shadow_size = nr_shadow_pages << PAGE_SHIFT;
0191 shadow_end = shadow_start + shadow_size;
0192
0193 if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
0194 WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
0195 return NOTIFY_BAD;
0196
0197 switch (action) {
0198 case MEM_GOING_ONLINE: {
0199 void *ret;
0200
0201
0202
0203
0204
0205
0206 if (shadow_mapped(shadow_start))
0207 return NOTIFY_OK;
0208
0209 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
0210 shadow_end, GFP_KERNEL,
0211 PAGE_KERNEL, VM_NO_GUARD,
0212 pfn_to_nid(mem_data->start_pfn),
0213 __builtin_return_address(0));
0214 if (!ret)
0215 return NOTIFY_BAD;
0216
0217 kmemleak_ignore(ret);
0218 return NOTIFY_OK;
0219 }
0220 case MEM_CANCEL_ONLINE:
0221 case MEM_OFFLINE: {
0222 struct vm_struct *vm;
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236 vm = find_vm_area((void *)shadow_start);
0237 if (vm)
0238 vfree((void *)shadow_start);
0239 }
0240 }
0241
0242 return NOTIFY_OK;
0243 }
0244
0245 static int __init kasan_memhotplug_init(void)
0246 {
0247 hotplug_memory_notifier(kasan_mem_notifier, 0);
0248
0249 return 0;
0250 }
0251
0252 core_initcall(kasan_memhotplug_init);
0253 #endif
0254
0255 #ifdef CONFIG_KASAN_VMALLOC
0256
0257 void __init __weak kasan_populate_early_vm_area_shadow(void *start,
0258 unsigned long size)
0259 {
0260 }
0261
0262 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
0263 void *unused)
0264 {
0265 unsigned long page;
0266 pte_t pte;
0267
0268 if (likely(!pte_none(*ptep)))
0269 return 0;
0270
0271 page = __get_free_page(GFP_KERNEL);
0272 if (!page)
0273 return -ENOMEM;
0274
0275 memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
0276 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
0277
0278 spin_lock(&init_mm.page_table_lock);
0279 if (likely(pte_none(*ptep))) {
0280 set_pte_at(&init_mm, addr, ptep, pte);
0281 page = 0;
0282 }
0283 spin_unlock(&init_mm.page_table_lock);
0284 if (page)
0285 free_page(page);
0286 return 0;
0287 }
0288
0289 int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
0290 {
0291 unsigned long shadow_start, shadow_end;
0292 int ret;
0293
0294 if (!is_vmalloc_or_module_addr((void *)addr))
0295 return 0;
0296
0297 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
0298 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
0299
0300
0301
0302
0303
0304
0305
0306
0307 if (IS_ENABLED(CONFIG_UML)) {
0308 __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start);
0309 return 0;
0310 }
0311
0312 shadow_start = PAGE_ALIGN_DOWN(shadow_start);
0313 shadow_end = PAGE_ALIGN(shadow_end);
0314
0315 ret = apply_to_page_range(&init_mm, shadow_start,
0316 shadow_end - shadow_start,
0317 kasan_populate_vmalloc_pte, NULL);
0318 if (ret)
0319 return ret;
0320
0321 flush_cache_vmap(shadow_start, shadow_end);
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358 return 0;
0359 }
0360
0361 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
0362 void *unused)
0363 {
0364 unsigned long page;
0365
0366 page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
0367
0368 spin_lock(&init_mm.page_table_lock);
0369
0370 if (likely(!pte_none(*ptep))) {
0371 pte_clear(&init_mm, addr, ptep);
0372 free_page(page);
0373 }
0374 spin_unlock(&init_mm.page_table_lock);
0375
0376 return 0;
0377 }
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454 void kasan_release_vmalloc(unsigned long start, unsigned long end,
0455 unsigned long free_region_start,
0456 unsigned long free_region_end)
0457 {
0458 void *shadow_start, *shadow_end;
0459 unsigned long region_start, region_end;
0460 unsigned long size;
0461
0462 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
0463 region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
0464
0465 free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
0466
0467 if (start != region_start &&
0468 free_region_start < region_start)
0469 region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
0470
0471 free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
0472
0473 if (end != region_end &&
0474 free_region_end > region_end)
0475 region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
0476
0477 shadow_start = kasan_mem_to_shadow((void *)region_start);
0478 shadow_end = kasan_mem_to_shadow((void *)region_end);
0479
0480 if (shadow_end > shadow_start) {
0481 size = shadow_end - shadow_start;
0482 if (IS_ENABLED(CONFIG_UML)) {
0483 __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
0484 return;
0485 }
0486 apply_to_existing_page_range(&init_mm,
0487 (unsigned long)shadow_start,
0488 size, kasan_depopulate_vmalloc_pte,
0489 NULL);
0490 flush_tlb_kernel_range((unsigned long)shadow_start,
0491 (unsigned long)shadow_end);
0492 }
0493 }
0494
0495 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
0496 kasan_vmalloc_flags_t flags)
0497 {
0498
0499
0500
0501
0502
0503
0504
0505 if (!is_vmalloc_or_module_addr(start))
0506 return (void *)start;
0507
0508
0509
0510
0511
0512 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
0513 !(flags & KASAN_VMALLOC_PROT_NORMAL))
0514 return (void *)start;
0515
0516 start = set_tag(start, kasan_random_tag());
0517 kasan_unpoison(start, size, false);
0518 return (void *)start;
0519 }
0520
0521
0522
0523
0524
0525 void __kasan_poison_vmalloc(const void *start, unsigned long size)
0526 {
0527 if (!is_vmalloc_or_module_addr(start))
0528 return;
0529
0530 size = round_up(size, KASAN_GRANULE_SIZE);
0531 kasan_poison(start, size, KASAN_VMALLOC_INVALID, false);
0532 }
0533
0534 #else
0535
0536 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
0537 {
0538 void *ret;
0539 size_t scaled_size;
0540 size_t shadow_size;
0541 unsigned long shadow_start;
0542
0543 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
0544 scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
0545 KASAN_SHADOW_SCALE_SHIFT;
0546 shadow_size = round_up(scaled_size, PAGE_SIZE);
0547
0548 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
0549 return -EINVAL;
0550
0551 if (IS_ENABLED(CONFIG_UML)) {
0552 __memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size);
0553 return 0;
0554 }
0555
0556 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
0557 shadow_start + shadow_size,
0558 GFP_KERNEL,
0559 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
0560 __builtin_return_address(0));
0561
0562 if (ret) {
0563 struct vm_struct *vm = find_vm_area(addr);
0564 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
0565 vm->flags |= VM_KASAN;
0566 kmemleak_ignore(ret);
0567
0568 if (vm->flags & VM_DEFER_KMEMLEAK)
0569 kmemleak_vmalloc(vm, size, gfp_mask);
0570
0571 return 0;
0572 }
0573
0574 return -ENOMEM;
0575 }
0576
0577 void kasan_free_module_shadow(const struct vm_struct *vm)
0578 {
0579 if (IS_ENABLED(CONFIG_UML))
0580 return;
0581
0582 if (vm->flags & VM_KASAN)
0583 vfree(kasan_mem_to_shadow(vm->addr));
0584 }
0585
0586 #endif