0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/stackleak.h>
0014 #include <linux/kprobes.h>
0015
0016 #ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
0017 #include <linux/jump_label.h>
0018 #include <linux/sysctl.h>
0019 #include <linux/init.h>
0020
0021 static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
0022
0023 #ifdef CONFIG_SYSCTL
0024 static int stack_erasing_sysctl(struct ctl_table *table, int write,
0025 void __user *buffer, size_t *lenp, loff_t *ppos)
0026 {
0027 int ret = 0;
0028 int state = !static_branch_unlikely(&stack_erasing_bypass);
0029 int prev_state = state;
0030
0031 table->data = &state;
0032 table->maxlen = sizeof(int);
0033 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0034 state = !!state;
0035 if (ret || !write || state == prev_state)
0036 return ret;
0037
0038 if (state)
0039 static_branch_disable(&stack_erasing_bypass);
0040 else
0041 static_branch_enable(&stack_erasing_bypass);
0042
0043 pr_warn("stackleak: kernel stack erasing is %s\n",
0044 state ? "enabled" : "disabled");
0045 return ret;
0046 }
0047 static struct ctl_table stackleak_sysctls[] = {
0048 {
0049 .procname = "stack_erasing",
0050 .data = NULL,
0051 .maxlen = sizeof(int),
0052 .mode = 0600,
0053 .proc_handler = stack_erasing_sysctl,
0054 .extra1 = SYSCTL_ZERO,
0055 .extra2 = SYSCTL_ONE,
0056 },
0057 {}
0058 };
0059
0060 static int __init stackleak_sysctls_init(void)
0061 {
0062 register_sysctl_init("kernel", stackleak_sysctls);
0063 return 0;
0064 }
0065 late_initcall(stackleak_sysctls_init);
0066 #endif
0067
0068 #define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
0069 #else
0070 #define skip_erasing() false
0071 #endif
0072
0073 static __always_inline void __stackleak_erase(bool on_task_stack)
0074 {
0075 const unsigned long task_stack_low = stackleak_task_low_bound(current);
0076 const unsigned long task_stack_high = stackleak_task_high_bound(current);
0077 unsigned long erase_low, erase_high;
0078
0079 erase_low = stackleak_find_top_of_poison(task_stack_low,
0080 current->lowest_stack);
0081
0082 #ifdef CONFIG_STACKLEAK_METRICS
0083 current->prev_lowest_stack = erase_low;
0084 #endif
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 if (on_task_stack)
0100 erase_high = current_stack_pointer;
0101 else
0102 erase_high = task_stack_high;
0103
0104 while (erase_low < erase_high) {
0105 *(unsigned long *)erase_low = STACKLEAK_POISON;
0106 erase_low += sizeof(unsigned long);
0107 }
0108
0109
0110 current->lowest_stack = task_stack_high;
0111 }
0112
0113
0114
0115
0116
0117
0118 asmlinkage void noinstr stackleak_erase(void)
0119 {
0120 if (skip_erasing())
0121 return;
0122
0123 __stackleak_erase(on_thread_stack());
0124 }
0125
0126
0127
0128
0129
0130 asmlinkage void noinstr stackleak_erase_on_task_stack(void)
0131 {
0132 if (skip_erasing())
0133 return;
0134
0135 __stackleak_erase(true);
0136 }
0137
0138
0139
0140
0141
0142 asmlinkage void noinstr stackleak_erase_off_task_stack(void)
0143 {
0144 if (skip_erasing())
0145 return;
0146
0147 __stackleak_erase(false);
0148 }
0149
0150 void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
0151 {
0152 unsigned long sp = current_stack_pointer;
0153
0154
0155
0156
0157
0158
0159 BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
0160
0161
0162 sp = ALIGN(sp, sizeof(unsigned long));
0163 if (sp < current->lowest_stack &&
0164 sp >= stackleak_task_low_bound(current)) {
0165 current->lowest_stack = sp;
0166 }
0167 }
0168 EXPORT_SYMBOL(stackleak_track_stack);