0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/export.h>
0013 #include <linux/init.h>
0014 #include <linux/kasan.h>
0015 #include <linux/kernel.h>
0016 #include <linux/linkage.h>
0017 #include <linux/memblock.h>
0018 #include <linux/memory.h>
0019 #include <linux/mm.h>
0020 #include <linux/module.h>
0021 #include <linux/printk.h>
0022 #include <linux/sched.h>
0023 #include <linux/sched/task_stack.h>
0024 #include <linux/slab.h>
0025 #include <linux/stacktrace.h>
0026 #include <linux/string.h>
0027 #include <linux/types.h>
0028 #include <linux/bug.h>
0029
0030 #include "kasan.h"
0031 #include "../slab.h"
0032
0033 depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
0034 {
0035 unsigned long entries[KASAN_STACK_DEPTH];
0036 unsigned int nr_entries;
0037
0038 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
0039 return __stack_depot_save(entries, nr_entries, flags, can_alloc);
0040 }
0041
0042 void kasan_set_track(struct kasan_track *track, gfp_t flags)
0043 {
0044 track->pid = current->pid;
0045 track->stack = kasan_save_stack(flags, true);
0046 }
0047
0048 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
0049 void kasan_enable_current(void)
0050 {
0051 current->kasan_depth++;
0052 }
0053 EXPORT_SYMBOL(kasan_enable_current);
0054
0055 void kasan_disable_current(void)
0056 {
0057 current->kasan_depth--;
0058 }
0059 EXPORT_SYMBOL(kasan_disable_current);
0060
0061 #endif
0062
0063 void __kasan_unpoison_range(const void *address, size_t size)
0064 {
0065 kasan_unpoison(address, size, false);
0066 }
0067
0068 #ifdef CONFIG_KASAN_STACK
0069
0070 void kasan_unpoison_task_stack(struct task_struct *task)
0071 {
0072 void *base = task_stack_page(task);
0073
0074 kasan_unpoison(base, THREAD_SIZE, false);
0075 }
0076
0077
0078 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
0079 {
0080
0081
0082
0083
0084
0085 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
0086
0087 kasan_unpoison(base, watermark - base, false);
0088 }
0089 #endif
0090
0091
0092
0093
0094
0095 slab_flags_t __kasan_never_merge(void)
0096 {
0097 if (kasan_stack_collection_enabled())
0098 return SLAB_KASAN;
0099 return 0;
0100 }
0101
0102 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
0103 {
0104 u8 tag;
0105 unsigned long i;
0106
0107 if (unlikely(PageHighMem(page)))
0108 return;
0109
0110 tag = kasan_random_tag();
0111 kasan_unpoison(set_tag(page_address(page), tag),
0112 PAGE_SIZE << order, init);
0113 for (i = 0; i < (1 << order); i++)
0114 page_kasan_tag_set(page + i, tag);
0115 }
0116
0117 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
0118 {
0119 if (likely(!PageHighMem(page)))
0120 kasan_poison(page_address(page), PAGE_SIZE << order,
0121 KASAN_PAGE_FREE, init);
0122 }
0123
0124
0125
0126
0127
0128 static inline unsigned int optimal_redzone(unsigned int object_size)
0129 {
0130 return
0131 object_size <= 64 - 16 ? 16 :
0132 object_size <= 128 - 32 ? 32 :
0133 object_size <= 512 - 64 ? 64 :
0134 object_size <= 4096 - 128 ? 128 :
0135 object_size <= (1 << 14) - 256 ? 256 :
0136 object_size <= (1 << 15) - 512 ? 512 :
0137 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
0138 }
0139
0140 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
0141 slab_flags_t *flags)
0142 {
0143 unsigned int ok_size;
0144 unsigned int optimal_size;
0145
0146
0147
0148
0149
0150
0151
0152
0153 *flags |= SLAB_KASAN;
0154
0155 if (!kasan_stack_collection_enabled())
0156 return;
0157
0158 ok_size = *size;
0159
0160
0161 cache->kasan_info.alloc_meta_offset = *size;
0162 *size += sizeof(struct kasan_alloc_meta);
0163
0164
0165
0166
0167
0168
0169
0170 if (*size > KMALLOC_MAX_SIZE) {
0171 cache->kasan_info.alloc_meta_offset = 0;
0172 *size = ok_size;
0173
0174 }
0175
0176
0177 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
0178 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
0179 return;
0180 }
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
0193 cache->object_size < sizeof(struct kasan_free_meta)) {
0194 ok_size = *size;
0195
0196 cache->kasan_info.free_meta_offset = *size;
0197 *size += sizeof(struct kasan_free_meta);
0198
0199
0200 if (*size > KMALLOC_MAX_SIZE) {
0201 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
0202 *size = ok_size;
0203 }
0204 }
0205
0206
0207 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
0208
0209 if (optimal_size > KMALLOC_MAX_SIZE)
0210 optimal_size = KMALLOC_MAX_SIZE;
0211
0212 if (*size < optimal_size)
0213 *size = optimal_size;
0214 }
0215
0216 void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
0217 {
0218 cache->kasan_info.is_kmalloc = true;
0219 }
0220
0221 size_t __kasan_metadata_size(struct kmem_cache *cache)
0222 {
0223 if (!kasan_stack_collection_enabled())
0224 return 0;
0225 return (cache->kasan_info.alloc_meta_offset ?
0226 sizeof(struct kasan_alloc_meta) : 0) +
0227 (cache->kasan_info.free_meta_offset ?
0228 sizeof(struct kasan_free_meta) : 0);
0229 }
0230
0231 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
0232 const void *object)
0233 {
0234 if (!cache->kasan_info.alloc_meta_offset)
0235 return NULL;
0236 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
0237 }
0238
0239 #ifdef CONFIG_KASAN_GENERIC
0240 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
0241 const void *object)
0242 {
0243 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
0244 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
0245 return NULL;
0246 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
0247 }
0248 #endif
0249
0250 void __kasan_poison_slab(struct slab *slab)
0251 {
0252 struct page *page = slab_page(slab);
0253 unsigned long i;
0254
0255 for (i = 0; i < compound_nr(page); i++)
0256 page_kasan_tag_reset(page + i);
0257 kasan_poison(page_address(page), page_size(page),
0258 KASAN_SLAB_REDZONE, false);
0259 }
0260
0261 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
0262 {
0263 kasan_unpoison(object, cache->object_size, false);
0264 }
0265
0266 void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
0267 {
0268 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
0269 KASAN_SLAB_REDZONE, false);
0270 }
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 static inline u8 assign_tag(struct kmem_cache *cache,
0287 const void *object, bool init)
0288 {
0289 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0290 return 0xff;
0291
0292
0293
0294
0295
0296 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
0297 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
0298
0299
0300 #ifdef CONFIG_SLAB
0301
0302 return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
0303 #else
0304
0305
0306
0307
0308 return init ? kasan_random_tag() : get_tag(object);
0309 #endif
0310 }
0311
0312 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
0313 const void *object)
0314 {
0315 struct kasan_alloc_meta *alloc_meta;
0316
0317 if (kasan_stack_collection_enabled()) {
0318 alloc_meta = kasan_get_alloc_meta(cache, object);
0319 if (alloc_meta)
0320 __memset(alloc_meta, 0, sizeof(*alloc_meta));
0321 }
0322
0323
0324 object = set_tag(object, assign_tag(cache, object, true));
0325
0326 return (void *)object;
0327 }
0328
0329 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
0330 unsigned long ip, bool quarantine, bool init)
0331 {
0332 u8 tag;
0333 void *tagged_object;
0334
0335 if (!kasan_arch_is_ready())
0336 return false;
0337
0338 tag = get_tag(object);
0339 tagged_object = object;
0340 object = kasan_reset_tag(object);
0341
0342 if (is_kfence_address(object))
0343 return false;
0344
0345 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
0346 object)) {
0347 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
0348 return true;
0349 }
0350
0351
0352 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
0353 return false;
0354
0355 if (!kasan_byte_accessible(tagged_object)) {
0356 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
0357 return true;
0358 }
0359
0360 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
0361 KASAN_SLAB_FREE, init);
0362
0363 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
0364 return false;
0365
0366 if (kasan_stack_collection_enabled())
0367 kasan_set_free_info(cache, object, tag);
0368
0369 return kasan_quarantine_put(cache, object);
0370 }
0371
0372 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
0373 unsigned long ip, bool init)
0374 {
0375 return ____kasan_slab_free(cache, object, ip, true, init);
0376 }
0377
0378 static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
0379 {
0380 if (ptr != page_address(virt_to_head_page(ptr))) {
0381 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
0382 return true;
0383 }
0384
0385 if (!kasan_byte_accessible(ptr)) {
0386 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
0387 return true;
0388 }
0389
0390
0391
0392
0393
0394
0395 return false;
0396 }
0397
0398 void __kasan_kfree_large(void *ptr, unsigned long ip)
0399 {
0400 ____kasan_kfree_large(ptr, ip);
0401 }
0402
0403 void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
0404 {
0405 struct folio *folio;
0406
0407 folio = virt_to_folio(ptr);
0408
0409
0410
0411
0412
0413
0414
0415 if (unlikely(!folio_test_slab(folio))) {
0416 if (____kasan_kfree_large(ptr, ip))
0417 return;
0418 kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
0419 } else {
0420 struct slab *slab = folio_slab(folio);
0421
0422 ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
0423 }
0424 }
0425
0426 static void set_alloc_info(struct kmem_cache *cache, void *object,
0427 gfp_t flags, bool is_kmalloc)
0428 {
0429 struct kasan_alloc_meta *alloc_meta;
0430
0431
0432 if (cache->kasan_info.is_kmalloc && !is_kmalloc)
0433 return;
0434
0435 alloc_meta = kasan_get_alloc_meta(cache, object);
0436 if (alloc_meta)
0437 kasan_set_track(&alloc_meta->alloc_track, flags);
0438 }
0439
0440 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
0441 void *object, gfp_t flags, bool init)
0442 {
0443 u8 tag;
0444 void *tagged_object;
0445
0446 if (gfpflags_allow_blocking(flags))
0447 kasan_quarantine_reduce();
0448
0449 if (unlikely(object == NULL))
0450 return NULL;
0451
0452 if (is_kfence_address(object))
0453 return (void *)object;
0454
0455
0456
0457
0458
0459 tag = assign_tag(cache, object, false);
0460 tagged_object = set_tag(object, tag);
0461
0462
0463
0464
0465
0466 kasan_unpoison(tagged_object, cache->object_size, init);
0467
0468
0469 if (kasan_stack_collection_enabled())
0470 set_alloc_info(cache, (void *)object, flags, false);
0471
0472 return tagged_object;
0473 }
0474
0475 static inline void *____kasan_kmalloc(struct kmem_cache *cache,
0476 const void *object, size_t size, gfp_t flags)
0477 {
0478 unsigned long redzone_start;
0479 unsigned long redzone_end;
0480
0481 if (gfpflags_allow_blocking(flags))
0482 kasan_quarantine_reduce();
0483
0484 if (unlikely(object == NULL))
0485 return NULL;
0486
0487 if (is_kfence_address(kasan_reset_tag(object)))
0488 return (void *)object;
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0501 kasan_poison_last_granule((void *)object, size);
0502
0503
0504 redzone_start = round_up((unsigned long)(object + size),
0505 KASAN_GRANULE_SIZE);
0506 redzone_end = round_up((unsigned long)(object + cache->object_size),
0507 KASAN_GRANULE_SIZE);
0508 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
0509 KASAN_SLAB_REDZONE, false);
0510
0511
0512
0513
0514
0515 if (kasan_stack_collection_enabled())
0516 set_alloc_info(cache, (void *)object, flags, true);
0517
0518
0519 return (void *)object;
0520 }
0521
0522 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
0523 size_t size, gfp_t flags)
0524 {
0525 return ____kasan_kmalloc(cache, object, size, flags);
0526 }
0527 EXPORT_SYMBOL(__kasan_kmalloc);
0528
0529 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
0530 gfp_t flags)
0531 {
0532 unsigned long redzone_start;
0533 unsigned long redzone_end;
0534
0535 if (gfpflags_allow_blocking(flags))
0536 kasan_quarantine_reduce();
0537
0538 if (unlikely(ptr == NULL))
0539 return NULL;
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0552 kasan_poison_last_granule(ptr, size);
0553
0554
0555 redzone_start = round_up((unsigned long)(ptr + size),
0556 KASAN_GRANULE_SIZE);
0557 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
0558 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
0559 KASAN_PAGE_REDZONE, false);
0560
0561 return (void *)ptr;
0562 }
0563
0564 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
0565 {
0566 struct slab *slab;
0567
0568 if (unlikely(object == ZERO_SIZE_PTR))
0569 return (void *)object;
0570
0571
0572
0573
0574
0575
0576 kasan_unpoison(object, size, false);
0577
0578 slab = virt_to_slab(object);
0579
0580
0581 if (unlikely(!slab))
0582 return __kasan_kmalloc_large(object, size, flags);
0583 else
0584 return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
0585 }
0586
0587 bool __kasan_check_byte(const void *address, unsigned long ip)
0588 {
0589 if (!kasan_byte_accessible(address)) {
0590 kasan_report((unsigned long)address, 1, false, ip);
0591 return false;
0592 }
0593 return true;
0594 }