0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/stdarg.h>
0009
0010 #include <linux/kernel.h>
0011 #include <linux/lockdep.h>
0012 #include <linux/math.h>
0013 #include <linux/printk.h>
0014 #include <linux/sched/debug.h>
0015 #include <linux/seq_file.h>
0016 #include <linux/stacktrace.h>
0017 #include <linux/string.h>
0018 #include <trace/events/error_report.h>
0019
0020 #include <asm/kfence.h>
0021
0022 #include "kfence.h"
0023
0024
0025 #ifndef ARCH_FUNC_PREFIX
0026 #define ARCH_FUNC_PREFIX ""
0027 #endif
0028
0029 extern bool no_hash_pointers;
0030
0031
0032 __printf(2, 3)
0033 static void seq_con_printf(struct seq_file *seq, const char *fmt, ...)
0034 {
0035 va_list args;
0036
0037 va_start(args, fmt);
0038 if (seq)
0039 seq_vprintf(seq, fmt, args);
0040 else
0041 vprintk(fmt, args);
0042 va_end(args);
0043 }
0044
0045
0046
0047
0048
0049 static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries,
0050 const enum kfence_error_type *type)
0051 {
0052 char buf[64];
0053 int skipnr, fallback = 0;
0054
0055 if (type) {
0056
0057 switch (*type) {
0058 case KFENCE_ERROR_UAF:
0059 case KFENCE_ERROR_OOB:
0060 case KFENCE_ERROR_INVALID:
0061
0062
0063
0064
0065
0066 return 0;
0067 case KFENCE_ERROR_CORRUPTION:
0068 case KFENCE_ERROR_INVALID_FREE:
0069 break;
0070 }
0071 }
0072
0073 for (skipnr = 0; skipnr < num_entries; skipnr++) {
0074 int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
0075
0076 if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
0077 str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
0078 !strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) {
0079
0080
0081
0082
0083 fallback = skipnr + 1;
0084 }
0085
0086
0087 if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
0088 str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
0089 str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
0090 str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
0091 goto found;
0092 }
0093 if (fallback < num_entries)
0094 return fallback;
0095 found:
0096 skipnr++;
0097 return skipnr < num_entries ? skipnr : 0;
0098 }
0099
0100 static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
0101 bool show_alloc)
0102 {
0103 const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
0104 u64 ts_sec = track->ts_nsec;
0105 unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
0106
0107
0108 seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus:\n",
0109 show_alloc ? "allocated" : "freed", track->pid,
0110 track->cpu, (unsigned long)ts_sec, rem_nsec / 1000);
0111
0112 if (track->num_stack_entries) {
0113
0114 int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
0115
0116
0117 for (; i < track->num_stack_entries; i++)
0118 seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]);
0119 } else {
0120 seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation");
0121 }
0122 }
0123
0124 void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta)
0125 {
0126 const int size = abs(meta->size);
0127 const unsigned long start = meta->addr;
0128 const struct kmem_cache *const cache = meta->cache;
0129
0130 lockdep_assert_held(&meta->lock);
0131
0132 if (meta->state == KFENCE_OBJECT_UNUSED) {
0133 seq_con_printf(seq, "kfence-#%td unused\n", meta - kfence_metadata);
0134 return;
0135 }
0136
0137 seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
0138 meta - kfence_metadata, (void *)start, (void *)(start + size - 1),
0139 size, (cache && cache->name) ? cache->name : "<destroyed>");
0140
0141 kfence_print_stack(seq, meta, true);
0142
0143 if (meta->state == KFENCE_OBJECT_FREED) {
0144 seq_con_printf(seq, "\n");
0145 kfence_print_stack(seq, meta, false);
0146 }
0147 }
0148
0149
0150
0151
0152
0153 static void print_diff_canary(unsigned long address, size_t bytes_to_show,
0154 const struct kfence_metadata *meta)
0155 {
0156 const unsigned long show_until_addr = address + bytes_to_show;
0157 const u8 *cur, *end;
0158
0159
0160 end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr)
0161 : min(show_until_addr, PAGE_ALIGN(address)));
0162
0163 pr_cont("[");
0164 for (cur = (const u8 *)address; cur < end; cur++) {
0165 if (*cur == KFENCE_CANARY_PATTERN(cur))
0166 pr_cont(" .");
0167 else if (no_hash_pointers)
0168 pr_cont(" 0x%02x", *cur);
0169 else
0170 pr_cont(" !");
0171 }
0172 pr_cont(" ]");
0173 }
0174
0175 static const char *get_access_type(bool is_write)
0176 {
0177 return is_write ? "write" : "read";
0178 }
0179
0180 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
0181 const struct kfence_metadata *meta, enum kfence_error_type type)
0182 {
0183 unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
0184 const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
0185 int num_stack_entries;
0186 int skipnr = 0;
0187
0188 if (regs) {
0189 num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
0190 } else {
0191 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
0192 skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
0193 }
0194
0195
0196 if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
0197 return;
0198
0199 if (meta)
0200 lockdep_assert_held(&meta->lock);
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 lockdep_off();
0211
0212 pr_err("==================================================================\n");
0213
0214 switch (type) {
0215 case KFENCE_ERROR_OOB: {
0216 const bool left_of_object = address < meta->addr;
0217
0218 pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write),
0219 (void *)stack_entries[skipnr]);
0220 pr_err("Out-of-bounds %s at 0x%p (%luB %s of kfence-#%td):\n",
0221 get_access_type(is_write), (void *)address,
0222 left_of_object ? meta->addr - address : address - meta->addr,
0223 left_of_object ? "left" : "right", object_index);
0224 break;
0225 }
0226 case KFENCE_ERROR_UAF:
0227 pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write),
0228 (void *)stack_entries[skipnr]);
0229 pr_err("Use-after-free %s at 0x%p (in kfence-#%td):\n",
0230 get_access_type(is_write), (void *)address, object_index);
0231 break;
0232 case KFENCE_ERROR_CORRUPTION:
0233 pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]);
0234 pr_err("Corrupted memory at 0x%p ", (void *)address);
0235 print_diff_canary(address, 16, meta);
0236 pr_cont(" (in kfence-#%td):\n", object_index);
0237 break;
0238 case KFENCE_ERROR_INVALID:
0239 pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write),
0240 (void *)stack_entries[skipnr]);
0241 pr_err("Invalid %s at 0x%p:\n", get_access_type(is_write),
0242 (void *)address);
0243 break;
0244 case KFENCE_ERROR_INVALID_FREE:
0245 pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]);
0246 pr_err("Invalid free of 0x%p (in kfence-#%td):\n", (void *)address,
0247 object_index);
0248 break;
0249 }
0250
0251
0252 stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
0253
0254 if (meta) {
0255 pr_err("\n");
0256 kfence_print_object(NULL, meta);
0257 }
0258
0259
0260 pr_err("\n");
0261 if (no_hash_pointers && regs)
0262 show_regs(regs);
0263 else
0264 dump_stack_print_info(KERN_ERR);
0265 trace_error_report_end(ERROR_DETECTOR_KFENCE, address);
0266 pr_err("==================================================================\n");
0267
0268 lockdep_on();
0269
0270 if (panic_on_warn)
0271 panic("panic_on_warn set ...\n");
0272
0273
0274 add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
0275 }
0276
0277 #ifdef CONFIG_PRINTK
0278 static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
0279 {
0280 int i, j;
0281
0282 i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
0283 for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
0284 kp_stack[j] = (void *)track->stack_entries[i];
0285 if (j < KS_ADDRS_COUNT)
0286 kp_stack[j] = NULL;
0287 }
0288
0289 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
0290 {
0291 struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
0292 unsigned long flags;
0293
0294 if (!meta)
0295 return false;
0296
0297
0298
0299
0300
0301 kpp->kp_ptr = object;
0302
0303
0304 if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
0305 return true;
0306
0307 raw_spin_lock_irqsave(&meta->lock, flags);
0308
0309 kpp->kp_slab = slab;
0310 kpp->kp_slab_cache = meta->cache;
0311 kpp->kp_objp = (void *)meta->addr;
0312 kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
0313 if (meta->state == KFENCE_OBJECT_FREED)
0314 kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
0315
0316 kpp->kp_ret = kpp->kp_stack[0];
0317
0318 raw_spin_unlock_irqrestore(&meta->lock, flags);
0319
0320 return true;
0321 }
0322 #endif