Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * This file contains core generic KASAN code.
0004  *
0005  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
0006  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
0007  *
0008  * Some code borrowed from https://github.com/xairy/kasan-prototype by
0009  *        Andrey Konovalov <andreyknvl@gmail.com>
0010  */
0011 
0012 #include <linux/export.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/init.h>
0015 #include <linux/kasan.h>
0016 #include <linux/kernel.h>
0017 #include <linux/kfence.h>
0018 #include <linux/kmemleak.h>
0019 #include <linux/linkage.h>
0020 #include <linux/memblock.h>
0021 #include <linux/memory.h>
0022 #include <linux/mm.h>
0023 #include <linux/module.h>
0024 #include <linux/printk.h>
0025 #include <linux/sched.h>
0026 #include <linux/sched/task_stack.h>
0027 #include <linux/slab.h>
0028 #include <linux/stacktrace.h>
0029 #include <linux/string.h>
0030 #include <linux/types.h>
0031 #include <linux/vmalloc.h>
0032 #include <linux/bug.h>
0033 
0034 #include "kasan.h"
0035 #include "../slab.h"
0036 
0037 /*
0038  * All functions below always inlined so compiler could
0039  * perform better optimizations in each of __asan_loadX/__assn_storeX
0040  * depending on memory access size X.
0041  */
0042 
0043 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
0044 {
0045     s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
0046 
0047     if (unlikely(shadow_value)) {
0048         s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
0049         return unlikely(last_accessible_byte >= shadow_value);
0050     }
0051 
0052     return false;
0053 }
0054 
0055 static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
0056                         unsigned long size)
0057 {
0058     u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
0059 
0060     /*
0061      * Access crosses 8(shadow size)-byte boundary. Such access maps
0062      * into 2 shadow bytes, so we need to check them both.
0063      */
0064     if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
0065         return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
0066 
0067     return memory_is_poisoned_1(addr + size - 1);
0068 }
0069 
0070 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
0071 {
0072     u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
0073 
0074     /* Unaligned 16-bytes access maps into 3 shadow bytes. */
0075     if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
0076         return *shadow_addr || memory_is_poisoned_1(addr + 15);
0077 
0078     return *shadow_addr;
0079 }
0080 
0081 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
0082                     size_t size)
0083 {
0084     while (size) {
0085         if (unlikely(*start))
0086             return (unsigned long)start;
0087         start++;
0088         size--;
0089     }
0090 
0091     return 0;
0092 }
0093 
0094 static __always_inline unsigned long memory_is_nonzero(const void *start,
0095                         const void *end)
0096 {
0097     unsigned int words;
0098     unsigned long ret;
0099     unsigned int prefix = (unsigned long)start % 8;
0100 
0101     if (end - start <= 16)
0102         return bytes_is_nonzero(start, end - start);
0103 
0104     if (prefix) {
0105         prefix = 8 - prefix;
0106         ret = bytes_is_nonzero(start, prefix);
0107         if (unlikely(ret))
0108             return ret;
0109         start += prefix;
0110     }
0111 
0112     words = (end - start) / 8;
0113     while (words) {
0114         if (unlikely(*(u64 *)start))
0115             return bytes_is_nonzero(start, 8);
0116         start += 8;
0117         words--;
0118     }
0119 
0120     return bytes_is_nonzero(start, (end - start) % 8);
0121 }
0122 
0123 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
0124                         size_t size)
0125 {
0126     unsigned long ret;
0127 
0128     ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
0129             kasan_mem_to_shadow((void *)addr + size - 1) + 1);
0130 
0131     if (unlikely(ret)) {
0132         unsigned long last_byte = addr + size - 1;
0133         s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
0134 
0135         if (unlikely(ret != (unsigned long)last_shadow ||
0136             ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
0137             return true;
0138     }
0139     return false;
0140 }
0141 
0142 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
0143 {
0144     if (__builtin_constant_p(size)) {
0145         switch (size) {
0146         case 1:
0147             return memory_is_poisoned_1(addr);
0148         case 2:
0149         case 4:
0150         case 8:
0151             return memory_is_poisoned_2_4_8(addr, size);
0152         case 16:
0153             return memory_is_poisoned_16(addr);
0154         default:
0155             BUILD_BUG();
0156         }
0157     }
0158 
0159     return memory_is_poisoned_n(addr, size);
0160 }
0161 
0162 static __always_inline bool check_region_inline(unsigned long addr,
0163                         size_t size, bool write,
0164                         unsigned long ret_ip)
0165 {
0166     if (!kasan_arch_is_ready())
0167         return true;
0168 
0169     if (unlikely(size == 0))
0170         return true;
0171 
0172     if (unlikely(addr + size < addr))
0173         return !kasan_report(addr, size, write, ret_ip);
0174 
0175     if (unlikely((void *)addr <
0176         kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
0177         return !kasan_report(addr, size, write, ret_ip);
0178     }
0179 
0180     if (likely(!memory_is_poisoned(addr, size)))
0181         return true;
0182 
0183     return !kasan_report(addr, size, write, ret_ip);
0184 }
0185 
0186 bool kasan_check_range(unsigned long addr, size_t size, bool write,
0187                     unsigned long ret_ip)
0188 {
0189     return check_region_inline(addr, size, write, ret_ip);
0190 }
0191 
0192 bool kasan_byte_accessible(const void *addr)
0193 {
0194     s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
0195 
0196     return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
0197 }
0198 
0199 void kasan_cache_shrink(struct kmem_cache *cache)
0200 {
0201     kasan_quarantine_remove_cache(cache);
0202 }
0203 
0204 void kasan_cache_shutdown(struct kmem_cache *cache)
0205 {
0206     if (!__kmem_cache_empty(cache))
0207         kasan_quarantine_remove_cache(cache);
0208 }
0209 
0210 static void register_global(struct kasan_global *global)
0211 {
0212     size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
0213 
0214     kasan_unpoison(global->beg, global->size, false);
0215 
0216     kasan_poison(global->beg + aligned_size,
0217              global->size_with_redzone - aligned_size,
0218              KASAN_GLOBAL_REDZONE, false);
0219 }
0220 
0221 void __asan_register_globals(struct kasan_global *globals, size_t size)
0222 {
0223     int i;
0224 
0225     for (i = 0; i < size; i++)
0226         register_global(&globals[i]);
0227 }
0228 EXPORT_SYMBOL(__asan_register_globals);
0229 
0230 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
0231 {
0232 }
0233 EXPORT_SYMBOL(__asan_unregister_globals);
0234 
0235 #define DEFINE_ASAN_LOAD_STORE(size)                    \
0236     void __asan_load##size(unsigned long addr)          \
0237     {                               \
0238         check_region_inline(addr, size, false, _RET_IP_);   \
0239     }                               \
0240     EXPORT_SYMBOL(__asan_load##size);               \
0241     __alias(__asan_load##size)                  \
0242     void __asan_load##size##_noabort(unsigned long);        \
0243     EXPORT_SYMBOL(__asan_load##size##_noabort);         \
0244     void __asan_store##size(unsigned long addr)         \
0245     {                               \
0246         check_region_inline(addr, size, true, _RET_IP_);    \
0247     }                               \
0248     EXPORT_SYMBOL(__asan_store##size);              \
0249     __alias(__asan_store##size)                 \
0250     void __asan_store##size##_noabort(unsigned long);       \
0251     EXPORT_SYMBOL(__asan_store##size##_noabort)
0252 
0253 DEFINE_ASAN_LOAD_STORE(1);
0254 DEFINE_ASAN_LOAD_STORE(2);
0255 DEFINE_ASAN_LOAD_STORE(4);
0256 DEFINE_ASAN_LOAD_STORE(8);
0257 DEFINE_ASAN_LOAD_STORE(16);
0258 
0259 void __asan_loadN(unsigned long addr, size_t size)
0260 {
0261     kasan_check_range(addr, size, false, _RET_IP_);
0262 }
0263 EXPORT_SYMBOL(__asan_loadN);
0264 
0265 __alias(__asan_loadN)
0266 void __asan_loadN_noabort(unsigned long, size_t);
0267 EXPORT_SYMBOL(__asan_loadN_noabort);
0268 
0269 void __asan_storeN(unsigned long addr, size_t size)
0270 {
0271     kasan_check_range(addr, size, true, _RET_IP_);
0272 }
0273 EXPORT_SYMBOL(__asan_storeN);
0274 
0275 __alias(__asan_storeN)
0276 void __asan_storeN_noabort(unsigned long, size_t);
0277 EXPORT_SYMBOL(__asan_storeN_noabort);
0278 
0279 /* to shut up compiler complaints */
0280 void __asan_handle_no_return(void) {}
0281 EXPORT_SYMBOL(__asan_handle_no_return);
0282 
0283 /* Emitted by compiler to poison alloca()ed objects. */
0284 void __asan_alloca_poison(unsigned long addr, size_t size)
0285 {
0286     size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
0287     size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
0288             rounded_up_size;
0289     size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
0290 
0291     const void *left_redzone = (const void *)(addr -
0292             KASAN_ALLOCA_REDZONE_SIZE);
0293     const void *right_redzone = (const void *)(addr + rounded_up_size);
0294 
0295     WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
0296 
0297     kasan_unpoison((const void *)(addr + rounded_down_size),
0298             size - rounded_down_size, false);
0299     kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
0300              KASAN_ALLOCA_LEFT, false);
0301     kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
0302              KASAN_ALLOCA_RIGHT, false);
0303 }
0304 EXPORT_SYMBOL(__asan_alloca_poison);
0305 
0306 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
0307 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
0308 {
0309     if (unlikely(!stack_top || stack_top > stack_bottom))
0310         return;
0311 
0312     kasan_unpoison(stack_top, stack_bottom - stack_top, false);
0313 }
0314 EXPORT_SYMBOL(__asan_allocas_unpoison);
0315 
0316 /* Emitted by the compiler to [un]poison local variables. */
0317 #define DEFINE_ASAN_SET_SHADOW(byte) \
0318     void __asan_set_shadow_##byte(const void *addr, size_t size)    \
0319     {                               \
0320         __memset((void *)addr, 0x##byte, size);         \
0321     }                               \
0322     EXPORT_SYMBOL(__asan_set_shadow_##byte)
0323 
0324 DEFINE_ASAN_SET_SHADOW(00);
0325 DEFINE_ASAN_SET_SHADOW(f1);
0326 DEFINE_ASAN_SET_SHADOW(f2);
0327 DEFINE_ASAN_SET_SHADOW(f3);
0328 DEFINE_ASAN_SET_SHADOW(f5);
0329 DEFINE_ASAN_SET_SHADOW(f8);
0330 
0331 static void __kasan_record_aux_stack(void *addr, bool can_alloc)
0332 {
0333     struct slab *slab = kasan_addr_to_slab(addr);
0334     struct kmem_cache *cache;
0335     struct kasan_alloc_meta *alloc_meta;
0336     void *object;
0337 
0338     if (is_kfence_address(addr) || !slab)
0339         return;
0340 
0341     cache = slab->slab_cache;
0342     object = nearest_obj(cache, slab, addr);
0343     alloc_meta = kasan_get_alloc_meta(cache, object);
0344     if (!alloc_meta)
0345         return;
0346 
0347     alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
0348     alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT, can_alloc);
0349 }
0350 
0351 void kasan_record_aux_stack(void *addr)
0352 {
0353     return __kasan_record_aux_stack(addr, true);
0354 }
0355 
0356 void kasan_record_aux_stack_noalloc(void *addr)
0357 {
0358     return __kasan_record_aux_stack(addr, false);
0359 }
0360 
0361 void kasan_set_free_info(struct kmem_cache *cache,
0362                 void *object, u8 tag)
0363 {
0364     struct kasan_free_meta *free_meta;
0365 
0366     free_meta = kasan_get_free_meta(cache, object);
0367     if (!free_meta)
0368         return;
0369 
0370     kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
0371     /* The object was freed and has free track set. */
0372     *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
0373 }
0374 
0375 struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
0376                 void *object, u8 tag)
0377 {
0378     if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREETRACK)
0379         return NULL;
0380     /* Free meta must be present with KASAN_SLAB_FREETRACK. */
0381     return &kasan_get_free_meta(cache, object)->free_track;
0382 }