![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-only */ 0002 #ifndef _LINUX_RANDOMIZE_KSTACK_H 0003 #define _LINUX_RANDOMIZE_KSTACK_H 0004 0005 #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET 0006 #include <linux/kernel.h> 0007 #include <linux/jump_label.h> 0008 #include <linux/percpu-defs.h> 0009 0010 DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, 0011 randomize_kstack_offset); 0012 DECLARE_PER_CPU(u32, kstack_offset); 0013 0014 /* 0015 * Do not use this anywhere else in the kernel. This is used here because 0016 * it provides an arch-agnostic way to grow the stack with correct 0017 * alignment. Also, since this use is being explicitly masked to a max of 0018 * 10 bits, stack-clash style attacks are unlikely. For more details see 0019 * "VLAs" in Documentation/process/deprecated.rst 0020 * 0021 * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently 0022 * only with Clang and not GCC). Initializing the unused area on each syscall 0023 * entry is expensive, and generating an implicit call to memset() may also be 0024 * problematic (such as in noinstr functions). Therefore, if the compiler 0025 * supports it (which it should if it initializes allocas), always use the 0026 * "uninitialized" variant of the builtin. 0027 */ 0028 #if __has_builtin(__builtin_alloca_uninitialized) 0029 #define __kstack_alloca __builtin_alloca_uninitialized 0030 #else 0031 #define __kstack_alloca __builtin_alloca 0032 #endif 0033 0034 /* 0035 * Use, at most, 10 bits of entropy. We explicitly cap this to keep the 0036 * "VLA" from being unbounded (see above). 10 bits leaves enough room for 0037 * per-arch offset masks to reduce entropy (by removing higher bits, since 0038 * high entropy may overly constrain usable stack space), and for 0039 * compiler/arch-specific stack alignment to remove the lower bits. 0040 */ 0041 #define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF) 0042 0043 /** 0044 * add_random_kstack_offset - Increase stack utilization by previously 0045 * chosen random offset 0046 * 0047 * This should be used in the syscall entry path when interrupts and 0048 * preempt are disabled, and after user registers have been stored to 0049 * the stack. For testing the resulting entropy, please see: 0050 * tools/testing/selftests/lkdtm/stack-entropy.sh 0051 */ 0052 #define add_random_kstack_offset() do { \ 0053 if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ 0054 &randomize_kstack_offset)) { \ 0055 u32 offset = raw_cpu_read(kstack_offset); \ 0056 u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \ 0057 /* Keep allocation even after "ptr" loses scope. */ \ 0058 asm volatile("" :: "r"(ptr) : "memory"); \ 0059 } \ 0060 } while (0) 0061 0062 /** 0063 * choose_random_kstack_offset - Choose the random offset for the next 0064 * add_random_kstack_offset() 0065 * 0066 * This should only be used during syscall exit when interrupts and 0067 * preempt are disabled. This position in the syscall flow is done to 0068 * frustrate attacks from userspace attempting to learn the next offset: 0069 * - Maximize the timing uncertainty visible from userspace: if the 0070 * offset is chosen at syscall entry, userspace has much more control 0071 * over the timing between choosing offsets. "How long will we be in 0072 * kernel mode?" tends to be more difficult to predict than "how long 0073 * will we be in user mode?" 0074 * - Reduce the lifetime of the new offset sitting in memory during 0075 * kernel mode execution. Exposure of "thread-local" memory content 0076 * (e.g. current, percpu, etc) tends to be easier than arbitrary 0077 * location memory exposure. 0078 */ 0079 #define choose_random_kstack_offset(rand) do { \ 0080 if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ 0081 &randomize_kstack_offset)) { \ 0082 u32 offset = raw_cpu_read(kstack_offset); \ 0083 offset ^= (rand); \ 0084 raw_cpu_write(kstack_offset, offset); \ 0085 } \ 0086 } while (0) 0087 #else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ 0088 #define add_random_kstack_offset() do { } while (0) 0089 #define choose_random_kstack_offset(rand) do { } while (0) 0090 #endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ 0091 0092 #endif
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |