Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * This file contains common KASAN code.
0004  *
0005  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
0006  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
0007  *
0008  * Some code borrowed from https://github.com/xairy/kasan-prototype by
0009  *        Andrey Konovalov <andreyknvl@gmail.com>
0010  */
0011 
0012 #include <linux/export.h>
0013 #include <linux/init.h>
0014 #include <linux/kasan.h>
0015 #include <linux/kernel.h>
0016 #include <linux/linkage.h>
0017 #include <linux/memblock.h>
0018 #include <linux/memory.h>
0019 #include <linux/mm.h>
0020 #include <linux/module.h>
0021 #include <linux/printk.h>
0022 #include <linux/sched.h>
0023 #include <linux/sched/task_stack.h>
0024 #include <linux/slab.h>
0025 #include <linux/stacktrace.h>
0026 #include <linux/string.h>
0027 #include <linux/types.h>
0028 #include <linux/bug.h>
0029 
0030 #include "kasan.h"
0031 #include "../slab.h"
0032 
0033 depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
0034 {
0035     unsigned long entries[KASAN_STACK_DEPTH];
0036     unsigned int nr_entries;
0037 
0038     nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
0039     return __stack_depot_save(entries, nr_entries, flags, can_alloc);
0040 }
0041 
0042 void kasan_set_track(struct kasan_track *track, gfp_t flags)
0043 {
0044     track->pid = current->pid;
0045     track->stack = kasan_save_stack(flags, true);
0046 }
0047 
0048 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
0049 void kasan_enable_current(void)
0050 {
0051     current->kasan_depth++;
0052 }
0053 EXPORT_SYMBOL(kasan_enable_current);
0054 
0055 void kasan_disable_current(void)
0056 {
0057     current->kasan_depth--;
0058 }
0059 EXPORT_SYMBOL(kasan_disable_current);
0060 
0061 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
0062 
0063 void __kasan_unpoison_range(const void *address, size_t size)
0064 {
0065     kasan_unpoison(address, size, false);
0066 }
0067 
0068 #ifdef CONFIG_KASAN_STACK
0069 /* Unpoison the entire stack for a task. */
0070 void kasan_unpoison_task_stack(struct task_struct *task)
0071 {
0072     void *base = task_stack_page(task);
0073 
0074     kasan_unpoison(base, THREAD_SIZE, false);
0075 }
0076 
0077 /* Unpoison the stack for the current task beyond a watermark sp value. */
0078 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
0079 {
0080     /*
0081      * Calculate the task stack base address.  Avoid using 'current'
0082      * because this function is called by early resume code which hasn't
0083      * yet set up the percpu register (%gs).
0084      */
0085     void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
0086 
0087     kasan_unpoison(base, watermark - base, false);
0088 }
0089 #endif /* CONFIG_KASAN_STACK */
0090 
0091 /*
0092  * Only allow cache merging when stack collection is disabled and no metadata
0093  * is present.
0094  */
0095 slab_flags_t __kasan_never_merge(void)
0096 {
0097     if (kasan_stack_collection_enabled())
0098         return SLAB_KASAN;
0099     return 0;
0100 }
0101 
0102 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
0103 {
0104     u8 tag;
0105     unsigned long i;
0106 
0107     if (unlikely(PageHighMem(page)))
0108         return;
0109 
0110     tag = kasan_random_tag();
0111     kasan_unpoison(set_tag(page_address(page), tag),
0112                PAGE_SIZE << order, init);
0113     for (i = 0; i < (1 << order); i++)
0114         page_kasan_tag_set(page + i, tag);
0115 }
0116 
0117 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
0118 {
0119     if (likely(!PageHighMem(page)))
0120         kasan_poison(page_address(page), PAGE_SIZE << order,
0121                  KASAN_PAGE_FREE, init);
0122 }
0123 
0124 /*
0125  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
0126  * For larger allocations larger redzones are used.
0127  */
0128 static inline unsigned int optimal_redzone(unsigned int object_size)
0129 {
0130     return
0131         object_size <= 64        - 16   ? 16 :
0132         object_size <= 128       - 32   ? 32 :
0133         object_size <= 512       - 64   ? 64 :
0134         object_size <= 4096      - 128  ? 128 :
0135         object_size <= (1 << 14) - 256  ? 256 :
0136         object_size <= (1 << 15) - 512  ? 512 :
0137         object_size <= (1 << 16) - 1024 ? 1024 : 2048;
0138 }
0139 
0140 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
0141               slab_flags_t *flags)
0142 {
0143     unsigned int ok_size;
0144     unsigned int optimal_size;
0145 
0146     /*
0147      * SLAB_KASAN is used to mark caches as ones that are sanitized by
0148      * KASAN. Currently this flag is used in two places:
0149      * 1. In slab_ksize() when calculating the size of the accessible
0150      *    memory within the object.
0151      * 2. In slab_common.c to prevent merging of sanitized caches.
0152      */
0153     *flags |= SLAB_KASAN;
0154 
0155     if (!kasan_stack_collection_enabled())
0156         return;
0157 
0158     ok_size = *size;
0159 
0160     /* Add alloc meta into redzone. */
0161     cache->kasan_info.alloc_meta_offset = *size;
0162     *size += sizeof(struct kasan_alloc_meta);
0163 
0164     /*
0165      * If alloc meta doesn't fit, don't add it.
0166      * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
0167      * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
0168      * larger sizes.
0169      */
0170     if (*size > KMALLOC_MAX_SIZE) {
0171         cache->kasan_info.alloc_meta_offset = 0;
0172         *size = ok_size;
0173         /* Continue, since free meta might still fit. */
0174     }
0175 
0176     /* Only the generic mode uses free meta or flexible redzones. */
0177     if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
0178         cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
0179         return;
0180     }
0181 
0182     /*
0183      * Add free meta into redzone when it's not possible to store
0184      * it in the object. This is the case when:
0185      * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
0186      *    be touched after it was freed, or
0187      * 2. Object has a constructor, which means it's expected to
0188      *    retain its content until the next allocation, or
0189      * 3. Object is too small.
0190      * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
0191      */
0192     if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
0193         cache->object_size < sizeof(struct kasan_free_meta)) {
0194         ok_size = *size;
0195 
0196         cache->kasan_info.free_meta_offset = *size;
0197         *size += sizeof(struct kasan_free_meta);
0198 
0199         /* If free meta doesn't fit, don't add it. */
0200         if (*size > KMALLOC_MAX_SIZE) {
0201             cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
0202             *size = ok_size;
0203         }
0204     }
0205 
0206     /* Calculate size with optimal redzone. */
0207     optimal_size = cache->object_size + optimal_redzone(cache->object_size);
0208     /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
0209     if (optimal_size > KMALLOC_MAX_SIZE)
0210         optimal_size = KMALLOC_MAX_SIZE;
0211     /* Use optimal size if the size with added metas is not large enough. */
0212     if (*size < optimal_size)
0213         *size = optimal_size;
0214 }
0215 
0216 void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
0217 {
0218     cache->kasan_info.is_kmalloc = true;
0219 }
0220 
0221 size_t __kasan_metadata_size(struct kmem_cache *cache)
0222 {
0223     if (!kasan_stack_collection_enabled())
0224         return 0;
0225     return (cache->kasan_info.alloc_meta_offset ?
0226         sizeof(struct kasan_alloc_meta) : 0) +
0227         (cache->kasan_info.free_meta_offset ?
0228         sizeof(struct kasan_free_meta) : 0);
0229 }
0230 
0231 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
0232                           const void *object)
0233 {
0234     if (!cache->kasan_info.alloc_meta_offset)
0235         return NULL;
0236     return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
0237 }
0238 
0239 #ifdef CONFIG_KASAN_GENERIC
0240 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
0241                         const void *object)
0242 {
0243     BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
0244     if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
0245         return NULL;
0246     return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
0247 }
0248 #endif
0249 
0250 void __kasan_poison_slab(struct slab *slab)
0251 {
0252     struct page *page = slab_page(slab);
0253     unsigned long i;
0254 
0255     for (i = 0; i < compound_nr(page); i++)
0256         page_kasan_tag_reset(page + i);
0257     kasan_poison(page_address(page), page_size(page),
0258              KASAN_SLAB_REDZONE, false);
0259 }
0260 
0261 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
0262 {
0263     kasan_unpoison(object, cache->object_size, false);
0264 }
0265 
0266 void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
0267 {
0268     kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
0269             KASAN_SLAB_REDZONE, false);
0270 }
0271 
0272 /*
0273  * This function assigns a tag to an object considering the following:
0274  * 1. A cache might have a constructor, which might save a pointer to a slab
0275  *    object somewhere (e.g. in the object itself). We preassign a tag for
0276  *    each object in caches with constructors during slab creation and reuse
0277  *    the same tag each time a particular object is allocated.
0278  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
0279  *    accessed after being freed. We preassign tags for objects in these
0280  *    caches as well.
0281  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
0282  *    is stored as an array of indexes instead of a linked list. Assign tags
0283  *    based on objects indexes, so that objects that are next to each other
0284  *    get different tags.
0285  */
0286 static inline u8 assign_tag(struct kmem_cache *cache,
0287                     const void *object, bool init)
0288 {
0289     if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0290         return 0xff;
0291 
0292     /*
0293      * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
0294      * set, assign a tag when the object is being allocated (init == false).
0295      */
0296     if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
0297         return init ? KASAN_TAG_KERNEL : kasan_random_tag();
0298 
0299     /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
0300 #ifdef CONFIG_SLAB
0301     /* For SLAB assign tags based on the object index in the freelist. */
0302     return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
0303 #else
0304     /*
0305      * For SLUB assign a random tag during slab creation, otherwise reuse
0306      * the already assigned tag.
0307      */
0308     return init ? kasan_random_tag() : get_tag(object);
0309 #endif
0310 }
0311 
0312 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
0313                         const void *object)
0314 {
0315     struct kasan_alloc_meta *alloc_meta;
0316 
0317     if (kasan_stack_collection_enabled()) {
0318         alloc_meta = kasan_get_alloc_meta(cache, object);
0319         if (alloc_meta)
0320             __memset(alloc_meta, 0, sizeof(*alloc_meta));
0321     }
0322 
0323     /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
0324     object = set_tag(object, assign_tag(cache, object, true));
0325 
0326     return (void *)object;
0327 }
0328 
0329 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
0330                 unsigned long ip, bool quarantine, bool init)
0331 {
0332     u8 tag;
0333     void *tagged_object;
0334 
0335     if (!kasan_arch_is_ready())
0336         return false;
0337 
0338     tag = get_tag(object);
0339     tagged_object = object;
0340     object = kasan_reset_tag(object);
0341 
0342     if (is_kfence_address(object))
0343         return false;
0344 
0345     if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
0346         object)) {
0347         kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
0348         return true;
0349     }
0350 
0351     /* RCU slabs could be legally used after free within the RCU period */
0352     if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
0353         return false;
0354 
0355     if (!kasan_byte_accessible(tagged_object)) {
0356         kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
0357         return true;
0358     }
0359 
0360     kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
0361             KASAN_SLAB_FREE, init);
0362 
0363     if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
0364         return false;
0365 
0366     if (kasan_stack_collection_enabled())
0367         kasan_set_free_info(cache, object, tag);
0368 
0369     return kasan_quarantine_put(cache, object);
0370 }
0371 
0372 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
0373                 unsigned long ip, bool init)
0374 {
0375     return ____kasan_slab_free(cache, object, ip, true, init);
0376 }
0377 
0378 static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
0379 {
0380     if (ptr != page_address(virt_to_head_page(ptr))) {
0381         kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
0382         return true;
0383     }
0384 
0385     if (!kasan_byte_accessible(ptr)) {
0386         kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
0387         return true;
0388     }
0389 
0390     /*
0391      * The object will be poisoned by kasan_poison_pages() or
0392      * kasan_slab_free_mempool().
0393      */
0394 
0395     return false;
0396 }
0397 
0398 void __kasan_kfree_large(void *ptr, unsigned long ip)
0399 {
0400     ____kasan_kfree_large(ptr, ip);
0401 }
0402 
0403 void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
0404 {
0405     struct folio *folio;
0406 
0407     folio = virt_to_folio(ptr);
0408 
0409     /*
0410      * Even though this function is only called for kmem_cache_alloc and
0411      * kmalloc backed mempool allocations, those allocations can still be
0412      * !PageSlab() when the size provided to kmalloc is larger than
0413      * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
0414      */
0415     if (unlikely(!folio_test_slab(folio))) {
0416         if (____kasan_kfree_large(ptr, ip))
0417             return;
0418         kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
0419     } else {
0420         struct slab *slab = folio_slab(folio);
0421 
0422         ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
0423     }
0424 }
0425 
0426 static void set_alloc_info(struct kmem_cache *cache, void *object,
0427                 gfp_t flags, bool is_kmalloc)
0428 {
0429     struct kasan_alloc_meta *alloc_meta;
0430 
0431     /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
0432     if (cache->kasan_info.is_kmalloc && !is_kmalloc)
0433         return;
0434 
0435     alloc_meta = kasan_get_alloc_meta(cache, object);
0436     if (alloc_meta)
0437         kasan_set_track(&alloc_meta->alloc_track, flags);
0438 }
0439 
0440 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
0441                     void *object, gfp_t flags, bool init)
0442 {
0443     u8 tag;
0444     void *tagged_object;
0445 
0446     if (gfpflags_allow_blocking(flags))
0447         kasan_quarantine_reduce();
0448 
0449     if (unlikely(object == NULL))
0450         return NULL;
0451 
0452     if (is_kfence_address(object))
0453         return (void *)object;
0454 
0455     /*
0456      * Generate and assign random tag for tag-based modes.
0457      * Tag is ignored in set_tag() for the generic mode.
0458      */
0459     tag = assign_tag(cache, object, false);
0460     tagged_object = set_tag(object, tag);
0461 
0462     /*
0463      * Unpoison the whole object.
0464      * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
0465      */
0466     kasan_unpoison(tagged_object, cache->object_size, init);
0467 
0468     /* Save alloc info (if possible) for non-kmalloc() allocations. */
0469     if (kasan_stack_collection_enabled())
0470         set_alloc_info(cache, (void *)object, flags, false);
0471 
0472     return tagged_object;
0473 }
0474 
0475 static inline void *____kasan_kmalloc(struct kmem_cache *cache,
0476                 const void *object, size_t size, gfp_t flags)
0477 {
0478     unsigned long redzone_start;
0479     unsigned long redzone_end;
0480 
0481     if (gfpflags_allow_blocking(flags))
0482         kasan_quarantine_reduce();
0483 
0484     if (unlikely(object == NULL))
0485         return NULL;
0486 
0487     if (is_kfence_address(kasan_reset_tag(object)))
0488         return (void *)object;
0489 
0490     /*
0491      * The object has already been unpoisoned by kasan_slab_alloc() for
0492      * kmalloc() or by kasan_krealloc() for krealloc().
0493      */
0494 
0495     /*
0496      * The redzone has byte-level precision for the generic mode.
0497      * Partially poison the last object granule to cover the unaligned
0498      * part of the redzone.
0499      */
0500     if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0501         kasan_poison_last_granule((void *)object, size);
0502 
0503     /* Poison the aligned part of the redzone. */
0504     redzone_start = round_up((unsigned long)(object + size),
0505                 KASAN_GRANULE_SIZE);
0506     redzone_end = round_up((unsigned long)(object + cache->object_size),
0507                 KASAN_GRANULE_SIZE);
0508     kasan_poison((void *)redzone_start, redzone_end - redzone_start,
0509                KASAN_SLAB_REDZONE, false);
0510 
0511     /*
0512      * Save alloc info (if possible) for kmalloc() allocations.
0513      * This also rewrites the alloc info when called from kasan_krealloc().
0514      */
0515     if (kasan_stack_collection_enabled())
0516         set_alloc_info(cache, (void *)object, flags, true);
0517 
0518     /* Keep the tag that was set by kasan_slab_alloc(). */
0519     return (void *)object;
0520 }
0521 
0522 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
0523                     size_t size, gfp_t flags)
0524 {
0525     return ____kasan_kmalloc(cache, object, size, flags);
0526 }
0527 EXPORT_SYMBOL(__kasan_kmalloc);
0528 
0529 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
0530                         gfp_t flags)
0531 {
0532     unsigned long redzone_start;
0533     unsigned long redzone_end;
0534 
0535     if (gfpflags_allow_blocking(flags))
0536         kasan_quarantine_reduce();
0537 
0538     if (unlikely(ptr == NULL))
0539         return NULL;
0540 
0541     /*
0542      * The object has already been unpoisoned by kasan_unpoison_pages() for
0543      * alloc_pages() or by kasan_krealloc() for krealloc().
0544      */
0545 
0546     /*
0547      * The redzone has byte-level precision for the generic mode.
0548      * Partially poison the last object granule to cover the unaligned
0549      * part of the redzone.
0550      */
0551     if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0552         kasan_poison_last_granule(ptr, size);
0553 
0554     /* Poison the aligned part of the redzone. */
0555     redzone_start = round_up((unsigned long)(ptr + size),
0556                 KASAN_GRANULE_SIZE);
0557     redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
0558     kasan_poison((void *)redzone_start, redzone_end - redzone_start,
0559              KASAN_PAGE_REDZONE, false);
0560 
0561     return (void *)ptr;
0562 }
0563 
0564 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
0565 {
0566     struct slab *slab;
0567 
0568     if (unlikely(object == ZERO_SIZE_PTR))
0569         return (void *)object;
0570 
0571     /*
0572      * Unpoison the object's data.
0573      * Part of it might already have been unpoisoned, but it's unknown
0574      * how big that part is.
0575      */
0576     kasan_unpoison(object, size, false);
0577 
0578     slab = virt_to_slab(object);
0579 
0580     /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
0581     if (unlikely(!slab))
0582         return __kasan_kmalloc_large(object, size, flags);
0583     else
0584         return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
0585 }
0586 
0587 bool __kasan_check_byte(const void *address, unsigned long ip)
0588 {
0589     if (!kasan_byte_accessible(address)) {
0590         kasan_report((unsigned long)address, 1, false, ip);
0591         return false;
0592     }
0593     return true;
0594 }