0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0061
0062 #include <linux/init.h>
0063 #include <linux/kernel.h>
0064 #include <linux/list.h>
0065 #include <linux/sched/signal.h>
0066 #include <linux/sched/task.h>
0067 #include <linux/sched/task_stack.h>
0068 #include <linux/jiffies.h>
0069 #include <linux/delay.h>
0070 #include <linux/export.h>
0071 #include <linux/kthread.h>
0072 #include <linux/rbtree.h>
0073 #include <linux/fs.h>
0074 #include <linux/debugfs.h>
0075 #include <linux/seq_file.h>
0076 #include <linux/cpumask.h>
0077 #include <linux/spinlock.h>
0078 #include <linux/module.h>
0079 #include <linux/mutex.h>
0080 #include <linux/rcupdate.h>
0081 #include <linux/stacktrace.h>
0082 #include <linux/cache.h>
0083 #include <linux/percpu.h>
0084 #include <linux/memblock.h>
0085 #include <linux/pfn.h>
0086 #include <linux/mmzone.h>
0087 #include <linux/slab.h>
0088 #include <linux/thread_info.h>
0089 #include <linux/err.h>
0090 #include <linux/uaccess.h>
0091 #include <linux/string.h>
0092 #include <linux/nodemask.h>
0093 #include <linux/mm.h>
0094 #include <linux/workqueue.h>
0095 #include <linux/crc32.h>
0096
0097 #include <asm/sections.h>
0098 #include <asm/processor.h>
0099 #include <linux/atomic.h>
0100
0101 #include <linux/kasan.h>
0102 #include <linux/kfence.h>
0103 #include <linux/kmemleak.h>
0104 #include <linux/memory_hotplug.h>
0105
0106
0107
0108
0109 #define MAX_TRACE 16
0110 #define MSECS_MIN_AGE 5000
0111 #define SECS_FIRST_SCAN 60
0112 #define SECS_SCAN_WAIT 600
0113 #define MAX_SCAN_SIZE 4096
0114
0115 #define BYTES_PER_POINTER sizeof(void *)
0116
0117
0118 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
0119 __GFP_NOLOCKDEP)) | \
0120 __GFP_NORETRY | __GFP_NOMEMALLOC | \
0121 __GFP_NOWARN)
0122
0123
0124 struct kmemleak_scan_area {
0125 struct hlist_node node;
0126 unsigned long start;
0127 size_t size;
0128 };
0129
0130 #define KMEMLEAK_GREY 0
0131 #define KMEMLEAK_BLACK -1
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 struct kmemleak_object {
0142 raw_spinlock_t lock;
0143 unsigned int flags;
0144 struct list_head object_list;
0145 struct list_head gray_list;
0146 struct rb_node rb_node;
0147 struct rcu_head rcu;
0148
0149 atomic_t use_count;
0150 unsigned long pointer;
0151 size_t size;
0152
0153 unsigned long excess_ref;
0154
0155 int min_count;
0156
0157 int count;
0158
0159 u32 checksum;
0160
0161 struct hlist_head area_list;
0162 unsigned long trace[MAX_TRACE];
0163 unsigned int trace_len;
0164 unsigned long jiffies;
0165 pid_t pid;
0166 char comm[TASK_COMM_LEN];
0167 };
0168
0169
0170 #define OBJECT_ALLOCATED (1 << 0)
0171
0172 #define OBJECT_REPORTED (1 << 1)
0173
0174 #define OBJECT_NO_SCAN (1 << 2)
0175
0176 #define OBJECT_FULL_SCAN (1 << 3)
0177
0178 #define OBJECT_PHYS (1 << 4)
0179
0180 #define HEX_PREFIX " "
0181
0182 #define HEX_ROW_SIZE 16
0183
0184 #define HEX_GROUP_SIZE 1
0185
0186 #define HEX_ASCII 1
0187
0188 #define HEX_MAX_LINES 2
0189
0190
0191 static LIST_HEAD(object_list);
0192
0193 static LIST_HEAD(gray_list);
0194
0195 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
0196 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
0197 static LIST_HEAD(mem_pool_free_list);
0198
0199 static struct rb_root object_tree_root = RB_ROOT;
0200
0201 static struct rb_root object_phys_tree_root = RB_ROOT;
0202
0203 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
0204
0205
0206 static struct kmem_cache *object_cache;
0207 static struct kmem_cache *scan_area_cache;
0208
0209
0210 static int kmemleak_enabled = 1;
0211
0212 static int kmemleak_free_enabled = 1;
0213
0214 static int kmemleak_initialized;
0215
0216 static int kmemleak_warning;
0217
0218 static int kmemleak_error;
0219
0220
0221 static unsigned long min_addr = ULONG_MAX;
0222 static unsigned long max_addr;
0223
0224 static struct task_struct *scan_thread;
0225
0226 static unsigned long jiffies_min_age;
0227 static unsigned long jiffies_last_scan;
0228
0229 static unsigned long jiffies_scan_wait;
0230
0231 static int kmemleak_stack_scan = 1;
0232
0233 static DEFINE_MUTEX(scan_mutex);
0234
0235 static int kmemleak_skip_disable;
0236
0237 static bool kmemleak_found_leaks;
0238
0239 static bool kmemleak_verbose;
0240 module_param_named(verbose, kmemleak_verbose, bool, 0600);
0241
0242 static void kmemleak_disable(void);
0243
0244
0245
0246
0247 #define kmemleak_warn(x...) do { \
0248 pr_warn(x); \
0249 dump_stack(); \
0250 kmemleak_warning = 1; \
0251 } while (0)
0252
0253
0254
0255
0256
0257
0258 #define kmemleak_stop(x...) do { \
0259 kmemleak_warn(x); \
0260 kmemleak_disable(); \
0261 } while (0)
0262
0263 #define warn_or_seq_printf(seq, fmt, ...) do { \
0264 if (seq) \
0265 seq_printf(seq, fmt, ##__VA_ARGS__); \
0266 else \
0267 pr_warn(fmt, ##__VA_ARGS__); \
0268 } while (0)
0269
0270 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
0271 int rowsize, int groupsize, const void *buf,
0272 size_t len, bool ascii)
0273 {
0274 if (seq)
0275 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
0276 buf, len, ascii);
0277 else
0278 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
0279 rowsize, groupsize, buf, len, ascii);
0280 }
0281
0282
0283
0284
0285
0286
0287
0288 static void hex_dump_object(struct seq_file *seq,
0289 struct kmemleak_object *object)
0290 {
0291 const u8 *ptr = (const u8 *)object->pointer;
0292 size_t len;
0293
0294 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
0295 return;
0296
0297
0298 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
0299
0300 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
0301 kasan_disable_current();
0302 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
0303 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
0304 kasan_enable_current();
0305 }
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317 static bool color_white(const struct kmemleak_object *object)
0318 {
0319 return object->count != KMEMLEAK_BLACK &&
0320 object->count < object->min_count;
0321 }
0322
0323 static bool color_gray(const struct kmemleak_object *object)
0324 {
0325 return object->min_count != KMEMLEAK_BLACK &&
0326 object->count >= object->min_count;
0327 }
0328
0329
0330
0331
0332
0333
0334 static bool unreferenced_object(struct kmemleak_object *object)
0335 {
0336 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
0337 time_before_eq(object->jiffies + jiffies_min_age,
0338 jiffies_last_scan);
0339 }
0340
0341
0342
0343
0344
0345 static void print_unreferenced(struct seq_file *seq,
0346 struct kmemleak_object *object)
0347 {
0348 int i;
0349 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
0350
0351 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
0352 object->pointer, object->size);
0353 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
0354 object->comm, object->pid, object->jiffies,
0355 msecs_age / 1000, msecs_age % 1000);
0356 hex_dump_object(seq, object);
0357 warn_or_seq_printf(seq, " backtrace:\n");
0358
0359 for (i = 0; i < object->trace_len; i++) {
0360 void *ptr = (void *)object->trace[i];
0361 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
0362 }
0363 }
0364
0365
0366
0367
0368
0369
0370 static void dump_object_info(struct kmemleak_object *object)
0371 {
0372 pr_notice("Object 0x%08lx (size %zu):\n",
0373 object->pointer, object->size);
0374 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
0375 object->comm, object->pid, object->jiffies);
0376 pr_notice(" min_count = %d\n", object->min_count);
0377 pr_notice(" count = %d\n", object->count);
0378 pr_notice(" flags = 0x%x\n", object->flags);
0379 pr_notice(" checksum = %u\n", object->checksum);
0380 pr_notice(" backtrace:\n");
0381 stack_trace_print(object->trace, object->trace_len, 4);
0382 }
0383
0384
0385
0386
0387
0388
0389
0390 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
0391 bool is_phys)
0392 {
0393 struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
0394 object_tree_root.rb_node;
0395 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
0396
0397 while (rb) {
0398 struct kmemleak_object *object;
0399 unsigned long untagged_objp;
0400
0401 object = rb_entry(rb, struct kmemleak_object, rb_node);
0402 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
0403
0404 if (untagged_ptr < untagged_objp)
0405 rb = object->rb_node.rb_left;
0406 else if (untagged_objp + object->size <= untagged_ptr)
0407 rb = object->rb_node.rb_right;
0408 else if (untagged_objp == untagged_ptr || alias)
0409 return object;
0410 else {
0411 kmemleak_warn("Found object by alias at 0x%08lx\n",
0412 ptr);
0413 dump_object_info(object);
0414 break;
0415 }
0416 }
0417 return NULL;
0418 }
0419
0420
0421 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
0422 {
0423 return __lookup_object(ptr, alias, false);
0424 }
0425
0426
0427
0428
0429
0430
0431
0432 static int get_object(struct kmemleak_object *object)
0433 {
0434 return atomic_inc_not_zero(&object->use_count);
0435 }
0436
0437
0438
0439
0440 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
0441 {
0442 unsigned long flags;
0443 struct kmemleak_object *object;
0444
0445
0446 if (object_cache) {
0447 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
0448 if (object)
0449 return object;
0450 }
0451
0452
0453 raw_spin_lock_irqsave(&kmemleak_lock, flags);
0454 object = list_first_entry_or_null(&mem_pool_free_list,
0455 typeof(*object), object_list);
0456 if (object)
0457 list_del(&object->object_list);
0458 else if (mem_pool_free_count)
0459 object = &mem_pool[--mem_pool_free_count];
0460 else
0461 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
0462 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
0463
0464 return object;
0465 }
0466
0467
0468
0469
0470 static void mem_pool_free(struct kmemleak_object *object)
0471 {
0472 unsigned long flags;
0473
0474 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
0475 kmem_cache_free(object_cache, object);
0476 return;
0477 }
0478
0479
0480 raw_spin_lock_irqsave(&kmemleak_lock, flags);
0481 list_add(&object->object_list, &mem_pool_free_list);
0482 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
0483 }
0484
0485
0486
0487
0488 static void free_object_rcu(struct rcu_head *rcu)
0489 {
0490 struct hlist_node *tmp;
0491 struct kmemleak_scan_area *area;
0492 struct kmemleak_object *object =
0493 container_of(rcu, struct kmemleak_object, rcu);
0494
0495
0496
0497
0498
0499 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
0500 hlist_del(&area->node);
0501 kmem_cache_free(scan_area_cache, area);
0502 }
0503 mem_pool_free(object);
0504 }
0505
0506
0507
0508
0509
0510
0511
0512
0513 static void put_object(struct kmemleak_object *object)
0514 {
0515 if (!atomic_dec_and_test(&object->use_count))
0516 return;
0517
0518
0519 WARN_ON(object->flags & OBJECT_ALLOCATED);
0520
0521
0522
0523
0524
0525
0526 if (object_cache)
0527 call_rcu(&object->rcu, free_object_rcu);
0528 else
0529 free_object_rcu(&object->rcu);
0530 }
0531
0532
0533
0534
0535 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
0536 bool is_phys)
0537 {
0538 unsigned long flags;
0539 struct kmemleak_object *object;
0540
0541 rcu_read_lock();
0542 raw_spin_lock_irqsave(&kmemleak_lock, flags);
0543 object = __lookup_object(ptr, alias, is_phys);
0544 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
0545
0546
0547 if (object && !get_object(object))
0548 object = NULL;
0549 rcu_read_unlock();
0550
0551 return object;
0552 }
0553
0554
0555 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
0556 {
0557 return __find_and_get_object(ptr, alias, false);
0558 }
0559
0560
0561
0562
0563
0564
0565 static void __remove_object(struct kmemleak_object *object)
0566 {
0567 rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
0568 &object_phys_tree_root :
0569 &object_tree_root);
0570 list_del_rcu(&object->object_list);
0571 }
0572
0573
0574
0575
0576
0577
0578
0579 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
0580 bool is_phys)
0581 {
0582 unsigned long flags;
0583 struct kmemleak_object *object;
0584
0585 raw_spin_lock_irqsave(&kmemleak_lock, flags);
0586 object = __lookup_object(ptr, alias, is_phys);
0587 if (object)
0588 __remove_object(object);
0589 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
0590
0591 return object;
0592 }
0593
0594
0595
0596
0597 static int __save_stack_trace(unsigned long *trace)
0598 {
0599 return stack_trace_save(trace, MAX_TRACE, 2);
0600 }
0601
0602
0603
0604
0605
0606
0607 static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
0608 int min_count, gfp_t gfp,
0609 bool is_phys)
0610 {
0611 unsigned long flags;
0612 struct kmemleak_object *object, *parent;
0613 struct rb_node **link, *rb_parent;
0614 unsigned long untagged_ptr;
0615 unsigned long untagged_objp;
0616
0617 object = mem_pool_alloc(gfp);
0618 if (!object) {
0619 pr_warn("Cannot allocate a kmemleak_object structure\n");
0620 kmemleak_disable();
0621 return NULL;
0622 }
0623
0624 INIT_LIST_HEAD(&object->object_list);
0625 INIT_LIST_HEAD(&object->gray_list);
0626 INIT_HLIST_HEAD(&object->area_list);
0627 raw_spin_lock_init(&object->lock);
0628 atomic_set(&object->use_count, 1);
0629 object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
0630 object->pointer = ptr;
0631 object->size = kfence_ksize((void *)ptr) ?: size;
0632 object->excess_ref = 0;
0633 object->min_count = min_count;
0634 object->count = 0;
0635 object->jiffies = jiffies;
0636 object->checksum = 0;
0637
0638
0639 if (in_hardirq()) {
0640 object->pid = 0;
0641 strncpy(object->comm, "hardirq", sizeof(object->comm));
0642 } else if (in_serving_softirq()) {
0643 object->pid = 0;
0644 strncpy(object->comm, "softirq", sizeof(object->comm));
0645 } else {
0646 object->pid = current->pid;
0647
0648
0649
0650
0651
0652
0653 strncpy(object->comm, current->comm, sizeof(object->comm));
0654 }
0655
0656
0657 object->trace_len = __save_stack_trace(object->trace);
0658
0659 raw_spin_lock_irqsave(&kmemleak_lock, flags);
0660
0661 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
0662
0663
0664
0665
0666 if (!is_phys) {
0667 min_addr = min(min_addr, untagged_ptr);
0668 max_addr = max(max_addr, untagged_ptr + size);
0669 }
0670 link = is_phys ? &object_phys_tree_root.rb_node :
0671 &object_tree_root.rb_node;
0672 rb_parent = NULL;
0673 while (*link) {
0674 rb_parent = *link;
0675 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
0676 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
0677 if (untagged_ptr + size <= untagged_objp)
0678 link = &parent->rb_node.rb_left;
0679 else if (untagged_objp + parent->size <= untagged_ptr)
0680 link = &parent->rb_node.rb_right;
0681 else {
0682 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
0683 ptr);
0684
0685
0686
0687
0688 dump_object_info(parent);
0689 kmem_cache_free(object_cache, object);
0690 object = NULL;
0691 goto out;
0692 }
0693 }
0694 rb_link_node(&object->rb_node, rb_parent, link);
0695 rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
0696 &object_tree_root);
0697
0698 list_add_tail_rcu(&object->object_list, &object_list);
0699 out:
0700 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
0701 return object;
0702 }
0703
0704
0705 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
0706 int min_count, gfp_t gfp)
0707 {
0708 return __create_object(ptr, size, min_count, gfp, false);
0709 }
0710
0711
0712 static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size,
0713 int min_count, gfp_t gfp)
0714 {
0715 return __create_object(ptr, size, min_count, gfp, true);
0716 }
0717
0718
0719
0720
0721 static void __delete_object(struct kmemleak_object *object)
0722 {
0723 unsigned long flags;
0724
0725 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
0726 WARN_ON(atomic_read(&object->use_count) < 1);
0727
0728
0729
0730
0731
0732 raw_spin_lock_irqsave(&object->lock, flags);
0733 object->flags &= ~OBJECT_ALLOCATED;
0734 raw_spin_unlock_irqrestore(&object->lock, flags);
0735 put_object(object);
0736 }
0737
0738
0739
0740
0741
0742 static void delete_object_full(unsigned long ptr)
0743 {
0744 struct kmemleak_object *object;
0745
0746 object = find_and_remove_object(ptr, 0, false);
0747 if (!object) {
0748 #ifdef DEBUG
0749 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
0750 ptr);
0751 #endif
0752 return;
0753 }
0754 __delete_object(object);
0755 }
0756
0757
0758
0759
0760
0761
0762 static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
0763 {
0764 struct kmemleak_object *object;
0765 unsigned long start, end;
0766
0767 object = find_and_remove_object(ptr, 1, is_phys);
0768 if (!object) {
0769 #ifdef DEBUG
0770 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
0771 ptr, size);
0772 #endif
0773 return;
0774 }
0775
0776
0777
0778
0779
0780
0781 start = object->pointer;
0782 end = object->pointer + object->size;
0783 if (ptr > start)
0784 __create_object(start, ptr - start, object->min_count,
0785 GFP_KERNEL, is_phys);
0786 if (ptr + size < end)
0787 __create_object(ptr + size, end - ptr - size, object->min_count,
0788 GFP_KERNEL, is_phys);
0789
0790 __delete_object(object);
0791 }
0792
0793 static void __paint_it(struct kmemleak_object *object, int color)
0794 {
0795 object->min_count = color;
0796 if (color == KMEMLEAK_BLACK)
0797 object->flags |= OBJECT_NO_SCAN;
0798 }
0799
0800 static void paint_it(struct kmemleak_object *object, int color)
0801 {
0802 unsigned long flags;
0803
0804 raw_spin_lock_irqsave(&object->lock, flags);
0805 __paint_it(object, color);
0806 raw_spin_unlock_irqrestore(&object->lock, flags);
0807 }
0808
0809 static void paint_ptr(unsigned long ptr, int color, bool is_phys)
0810 {
0811 struct kmemleak_object *object;
0812
0813 object = __find_and_get_object(ptr, 0, is_phys);
0814 if (!object) {
0815 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
0816 ptr,
0817 (color == KMEMLEAK_GREY) ? "Grey" :
0818 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
0819 return;
0820 }
0821 paint_it(object, color);
0822 put_object(object);
0823 }
0824
0825
0826
0827
0828
0829 static void make_gray_object(unsigned long ptr)
0830 {
0831 paint_ptr(ptr, KMEMLEAK_GREY, false);
0832 }
0833
0834
0835
0836
0837
0838 static void make_black_object(unsigned long ptr, bool is_phys)
0839 {
0840 paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
0841 }
0842
0843
0844
0845
0846
0847 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
0848 {
0849 unsigned long flags;
0850 struct kmemleak_object *object;
0851 struct kmemleak_scan_area *area = NULL;
0852 unsigned long untagged_ptr;
0853 unsigned long untagged_objp;
0854
0855 object = find_and_get_object(ptr, 1);
0856 if (!object) {
0857 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
0858 ptr);
0859 return;
0860 }
0861
0862 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
0863 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
0864
0865 if (scan_area_cache)
0866 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
0867
0868 raw_spin_lock_irqsave(&object->lock, flags);
0869 if (!area) {
0870 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
0871
0872 object->flags |= OBJECT_FULL_SCAN;
0873 goto out_unlock;
0874 }
0875 if (size == SIZE_MAX) {
0876 size = untagged_objp + object->size - untagged_ptr;
0877 } else if (untagged_ptr + size > untagged_objp + object->size) {
0878 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
0879 dump_object_info(object);
0880 kmem_cache_free(scan_area_cache, area);
0881 goto out_unlock;
0882 }
0883
0884 INIT_HLIST_NODE(&area->node);
0885 area->start = ptr;
0886 area->size = size;
0887
0888 hlist_add_head(&area->node, &object->area_list);
0889 out_unlock:
0890 raw_spin_unlock_irqrestore(&object->lock, flags);
0891 put_object(object);
0892 }
0893
0894
0895
0896
0897
0898
0899
0900 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
0901 {
0902 unsigned long flags;
0903 struct kmemleak_object *object;
0904
0905 object = find_and_get_object(ptr, 0);
0906 if (!object) {
0907 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
0908 ptr);
0909 return;
0910 }
0911
0912 raw_spin_lock_irqsave(&object->lock, flags);
0913 object->excess_ref = excess_ref;
0914 raw_spin_unlock_irqrestore(&object->lock, flags);
0915 put_object(object);
0916 }
0917
0918
0919
0920
0921
0922
0923 static void object_no_scan(unsigned long ptr)
0924 {
0925 unsigned long flags;
0926 struct kmemleak_object *object;
0927
0928 object = find_and_get_object(ptr, 0);
0929 if (!object) {
0930 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
0931 return;
0932 }
0933
0934 raw_spin_lock_irqsave(&object->lock, flags);
0935 object->flags |= OBJECT_NO_SCAN;
0936 raw_spin_unlock_irqrestore(&object->lock, flags);
0937 put_object(object);
0938 }
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
0955 gfp_t gfp)
0956 {
0957 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
0958
0959 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
0960 create_object((unsigned long)ptr, size, min_count, gfp);
0961 }
0962 EXPORT_SYMBOL_GPL(kmemleak_alloc);
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
0974 gfp_t gfp)
0975 {
0976 unsigned int cpu;
0977
0978 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
0979
0980
0981
0982
0983
0984 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
0985 for_each_possible_cpu(cpu)
0986 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
0987 size, 0, gfp);
0988 }
0989 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1001 {
1002 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
1003
1004
1005
1006
1007
1008 if (kmemleak_enabled) {
1009 create_object((unsigned long)area->addr, size, 2, gfp);
1010 object_set_excess_ref((unsigned long)area,
1011 (unsigned long)area->addr);
1012 }
1013 }
1014 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1015
1016
1017
1018
1019
1020
1021
1022
1023 void __ref kmemleak_free(const void *ptr)
1024 {
1025 pr_debug("%s(0x%p)\n", __func__, ptr);
1026
1027 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1028 delete_object_full((unsigned long)ptr);
1029 }
1030 EXPORT_SYMBOL_GPL(kmemleak_free);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 void __ref kmemleak_free_part(const void *ptr, size_t size)
1042 {
1043 pr_debug("%s(0x%p)\n", __func__, ptr);
1044
1045 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1046 delete_object_part((unsigned long)ptr, size, false);
1047 }
1048 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1049
1050
1051
1052
1053
1054
1055
1056
1057 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1058 {
1059 unsigned int cpu;
1060
1061 pr_debug("%s(0x%p)\n", __func__, ptr);
1062
1063 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1064 for_each_possible_cpu(cpu)
1065 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1066 cpu));
1067 }
1068 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1069
1070
1071
1072
1073
1074
1075
1076
1077 void __ref kmemleak_update_trace(const void *ptr)
1078 {
1079 struct kmemleak_object *object;
1080 unsigned long flags;
1081
1082 pr_debug("%s(0x%p)\n", __func__, ptr);
1083
1084 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1085 return;
1086
1087 object = find_and_get_object((unsigned long)ptr, 1);
1088 if (!object) {
1089 #ifdef DEBUG
1090 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1091 ptr);
1092 #endif
1093 return;
1094 }
1095
1096 raw_spin_lock_irqsave(&object->lock, flags);
1097 object->trace_len = __save_stack_trace(object->trace);
1098 raw_spin_unlock_irqrestore(&object->lock, flags);
1099
1100 put_object(object);
1101 }
1102 EXPORT_SYMBOL(kmemleak_update_trace);
1103
1104
1105
1106
1107
1108
1109
1110
1111 void __ref kmemleak_not_leak(const void *ptr)
1112 {
1113 pr_debug("%s(0x%p)\n", __func__, ptr);
1114
1115 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1116 make_gray_object((unsigned long)ptr);
1117 }
1118 EXPORT_SYMBOL(kmemleak_not_leak);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129 void __ref kmemleak_ignore(const void *ptr)
1130 {
1131 pr_debug("%s(0x%p)\n", __func__, ptr);
1132
1133 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1134 make_black_object((unsigned long)ptr, false);
1135 }
1136 EXPORT_SYMBOL(kmemleak_ignore);
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1150 {
1151 pr_debug("%s(0x%p)\n", __func__, ptr);
1152
1153 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1154 add_scan_area((unsigned long)ptr, size, gfp);
1155 }
1156 EXPORT_SYMBOL(kmemleak_scan_area);
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 void __ref kmemleak_no_scan(const void *ptr)
1168 {
1169 pr_debug("%s(0x%p)\n", __func__, ptr);
1170
1171 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1172 object_no_scan((unsigned long)ptr);
1173 }
1174 EXPORT_SYMBOL(kmemleak_no_scan);
1175
1176
1177
1178
1179
1180
1181
1182
1183 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1184 {
1185 pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size);
1186
1187 if (kmemleak_enabled)
1188
1189
1190
1191
1192 create_object_phys((unsigned long)phys, size, 0, gfp);
1193 }
1194 EXPORT_SYMBOL(kmemleak_alloc_phys);
1195
1196
1197
1198
1199
1200
1201
1202
1203 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1204 {
1205 pr_debug("%s(0x%pa)\n", __func__, &phys);
1206
1207 if (kmemleak_enabled)
1208 delete_object_part((unsigned long)phys, size, true);
1209 }
1210 EXPORT_SYMBOL(kmemleak_free_part_phys);
1211
1212
1213
1214
1215
1216
1217 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1218 {
1219 pr_debug("%s(0x%pa)\n", __func__, &phys);
1220
1221 if (kmemleak_enabled)
1222 make_black_object((unsigned long)phys, true);
1223 }
1224 EXPORT_SYMBOL(kmemleak_ignore_phys);
1225
1226
1227
1228
1229 static bool update_checksum(struct kmemleak_object *object)
1230 {
1231 u32 old_csum = object->checksum;
1232
1233 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1234 return false;
1235
1236 kasan_disable_current();
1237 kcsan_disable_current();
1238 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1239 kasan_enable_current();
1240 kcsan_enable_current();
1241
1242 return object->checksum != old_csum;
1243 }
1244
1245
1246
1247
1248 static void update_refs(struct kmemleak_object *object)
1249 {
1250 if (!color_white(object)) {
1251
1252 return;
1253 }
1254
1255
1256
1257
1258
1259
1260
1261 object->count++;
1262 if (color_gray(object)) {
1263
1264 WARN_ON(!get_object(object));
1265 list_add_tail(&object->gray_list, &gray_list);
1266 }
1267 }
1268
1269
1270
1271
1272
1273 static int scan_should_stop(void)
1274 {
1275 if (!kmemleak_enabled)
1276 return 1;
1277
1278
1279
1280
1281
1282 if (current->mm)
1283 return signal_pending(current);
1284 else
1285 return kthread_should_stop();
1286
1287 return 0;
1288 }
1289
1290
1291
1292
1293
1294 static void scan_block(void *_start, void *_end,
1295 struct kmemleak_object *scanned)
1296 {
1297 unsigned long *ptr;
1298 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1299 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1300 unsigned long flags;
1301 unsigned long untagged_ptr;
1302
1303 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1304 for (ptr = start; ptr < end; ptr++) {
1305 struct kmemleak_object *object;
1306 unsigned long pointer;
1307 unsigned long excess_ref;
1308
1309 if (scan_should_stop())
1310 break;
1311
1312 kasan_disable_current();
1313 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1314 kasan_enable_current();
1315
1316 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1317 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1318 continue;
1319
1320
1321
1322
1323
1324
1325
1326 object = lookup_object(pointer, 1);
1327 if (!object)
1328 continue;
1329 if (object == scanned)
1330
1331 continue;
1332
1333
1334
1335
1336
1337
1338 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1339
1340 if (color_gray(object)) {
1341 excess_ref = object->excess_ref;
1342
1343 } else {
1344 excess_ref = 0;
1345 update_refs(object);
1346 }
1347 raw_spin_unlock(&object->lock);
1348
1349 if (excess_ref) {
1350 object = lookup_object(excess_ref, 0);
1351 if (!object)
1352 continue;
1353 if (object == scanned)
1354
1355 continue;
1356 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1357 update_refs(object);
1358 raw_spin_unlock(&object->lock);
1359 }
1360 }
1361 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1362 }
1363
1364
1365
1366
1367 #ifdef CONFIG_SMP
1368 static void scan_large_block(void *start, void *end)
1369 {
1370 void *next;
1371
1372 while (start < end) {
1373 next = min(start + MAX_SCAN_SIZE, end);
1374 scan_block(start, next, NULL);
1375 start = next;
1376 cond_resched();
1377 }
1378 }
1379 #endif
1380
1381
1382
1383
1384
1385 static void scan_object(struct kmemleak_object *object)
1386 {
1387 struct kmemleak_scan_area *area;
1388 unsigned long flags;
1389 void *obj_ptr;
1390
1391
1392
1393
1394
1395 raw_spin_lock_irqsave(&object->lock, flags);
1396 if (object->flags & OBJECT_NO_SCAN)
1397 goto out;
1398 if (!(object->flags & OBJECT_ALLOCATED))
1399
1400 goto out;
1401
1402 obj_ptr = object->flags & OBJECT_PHYS ?
1403 __va((phys_addr_t)object->pointer) :
1404 (void *)object->pointer;
1405
1406 if (hlist_empty(&object->area_list) ||
1407 object->flags & OBJECT_FULL_SCAN) {
1408 void *start = obj_ptr;
1409 void *end = obj_ptr + object->size;
1410 void *next;
1411
1412 do {
1413 next = min(start + MAX_SCAN_SIZE, end);
1414 scan_block(start, next, object);
1415
1416 start = next;
1417 if (start >= end)
1418 break;
1419
1420 raw_spin_unlock_irqrestore(&object->lock, flags);
1421 cond_resched();
1422 raw_spin_lock_irqsave(&object->lock, flags);
1423 } while (object->flags & OBJECT_ALLOCATED);
1424 } else
1425 hlist_for_each_entry(area, &object->area_list, node)
1426 scan_block((void *)area->start,
1427 (void *)(area->start + area->size),
1428 object);
1429 out:
1430 raw_spin_unlock_irqrestore(&object->lock, flags);
1431 }
1432
1433
1434
1435
1436
1437 static void scan_gray_list(void)
1438 {
1439 struct kmemleak_object *object, *tmp;
1440
1441
1442
1443
1444
1445
1446 object = list_entry(gray_list.next, typeof(*object), gray_list);
1447 while (&object->gray_list != &gray_list) {
1448 cond_resched();
1449
1450
1451 if (!scan_should_stop())
1452 scan_object(object);
1453
1454 tmp = list_entry(object->gray_list.next, typeof(*object),
1455 gray_list);
1456
1457
1458 list_del(&object->gray_list);
1459 put_object(object);
1460
1461 object = tmp;
1462 }
1463 WARN_ON(!list_empty(&gray_list));
1464 }
1465
1466
1467
1468
1469
1470
1471 static void kmemleak_scan(void)
1472 {
1473 struct kmemleak_object *object;
1474 struct zone *zone;
1475 int __maybe_unused i;
1476 int new_leaks = 0;
1477 int loop1_cnt = 0;
1478
1479 jiffies_last_scan = jiffies;
1480
1481
1482 rcu_read_lock();
1483 list_for_each_entry_rcu(object, &object_list, object_list) {
1484 bool obj_pinned = false;
1485
1486 loop1_cnt++;
1487 raw_spin_lock_irq(&object->lock);
1488 #ifdef DEBUG
1489
1490
1491
1492
1493 if (atomic_read(&object->use_count) > 1) {
1494 pr_debug("object->use_count = %d\n",
1495 atomic_read(&object->use_count));
1496 dump_object_info(object);
1497 }
1498 #endif
1499
1500
1501 if ((object->flags & OBJECT_PHYS) &&
1502 !(object->flags & OBJECT_NO_SCAN)) {
1503 unsigned long phys = object->pointer;
1504
1505 if (PHYS_PFN(phys) < min_low_pfn ||
1506 PHYS_PFN(phys + object->size) >= max_low_pfn)
1507 __paint_it(object, KMEMLEAK_BLACK);
1508 }
1509
1510
1511 object->count = 0;
1512 if (color_gray(object) && get_object(object)) {
1513 list_add_tail(&object->gray_list, &gray_list);
1514 obj_pinned = true;
1515 }
1516
1517 raw_spin_unlock_irq(&object->lock);
1518
1519
1520
1521
1522
1523
1524 if (!(loop1_cnt & 0xffff)) {
1525 if (!obj_pinned && !get_object(object)) {
1526
1527 loop1_cnt--;
1528 continue;
1529 }
1530
1531 rcu_read_unlock();
1532 cond_resched();
1533 rcu_read_lock();
1534
1535 if (!obj_pinned)
1536 put_object(object);
1537 }
1538 }
1539 rcu_read_unlock();
1540
1541 #ifdef CONFIG_SMP
1542
1543 for_each_possible_cpu(i)
1544 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1545 __per_cpu_end + per_cpu_offset(i));
1546 #endif
1547
1548
1549
1550
1551 get_online_mems();
1552 for_each_populated_zone(zone) {
1553 unsigned long start_pfn = zone->zone_start_pfn;
1554 unsigned long end_pfn = zone_end_pfn(zone);
1555 unsigned long pfn;
1556
1557 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1558 struct page *page = pfn_to_online_page(pfn);
1559
1560 if (!page)
1561 continue;
1562
1563
1564 if (page_zone(page) != zone)
1565 continue;
1566
1567 if (page_count(page) == 0)
1568 continue;
1569 scan_block(page, page + 1, NULL);
1570 if (!(pfn & 63))
1571 cond_resched();
1572 }
1573 }
1574 put_online_mems();
1575
1576
1577
1578
1579 if (kmemleak_stack_scan) {
1580 struct task_struct *p, *g;
1581
1582 rcu_read_lock();
1583 for_each_process_thread(g, p) {
1584 void *stack = try_get_task_stack(p);
1585 if (stack) {
1586 scan_block(stack, stack + THREAD_SIZE, NULL);
1587 put_task_stack(p);
1588 }
1589 }
1590 rcu_read_unlock();
1591 }
1592
1593
1594
1595
1596
1597 scan_gray_list();
1598
1599
1600
1601
1602
1603 rcu_read_lock();
1604 list_for_each_entry_rcu(object, &object_list, object_list) {
1605
1606
1607
1608
1609
1610 if (!color_white(object))
1611 continue;
1612 raw_spin_lock_irq(&object->lock);
1613 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1614 && update_checksum(object) && get_object(object)) {
1615
1616 object->count = object->min_count;
1617 list_add_tail(&object->gray_list, &gray_list);
1618 }
1619 raw_spin_unlock_irq(&object->lock);
1620 }
1621 rcu_read_unlock();
1622
1623
1624
1625
1626 scan_gray_list();
1627
1628
1629
1630
1631 if (scan_should_stop())
1632 return;
1633
1634
1635
1636
1637 rcu_read_lock();
1638 list_for_each_entry_rcu(object, &object_list, object_list) {
1639
1640
1641
1642
1643
1644 if (!color_white(object))
1645 continue;
1646 raw_spin_lock_irq(&object->lock);
1647 if (unreferenced_object(object) &&
1648 !(object->flags & OBJECT_REPORTED)) {
1649 object->flags |= OBJECT_REPORTED;
1650
1651 if (kmemleak_verbose)
1652 print_unreferenced(NULL, object);
1653
1654 new_leaks++;
1655 }
1656 raw_spin_unlock_irq(&object->lock);
1657 }
1658 rcu_read_unlock();
1659
1660 if (new_leaks) {
1661 kmemleak_found_leaks = true;
1662
1663 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1664 new_leaks);
1665 }
1666
1667 }
1668
1669
1670
1671
1672
1673 static int kmemleak_scan_thread(void *arg)
1674 {
1675 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1676
1677 pr_info("Automatic memory scanning thread started\n");
1678 set_user_nice(current, 10);
1679
1680
1681
1682
1683 if (first_run) {
1684 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1685 first_run = 0;
1686 while (timeout && !kthread_should_stop())
1687 timeout = schedule_timeout_interruptible(timeout);
1688 }
1689
1690 while (!kthread_should_stop()) {
1691 signed long timeout = READ_ONCE(jiffies_scan_wait);
1692
1693 mutex_lock(&scan_mutex);
1694 kmemleak_scan();
1695 mutex_unlock(&scan_mutex);
1696
1697
1698 while (timeout && !kthread_should_stop())
1699 timeout = schedule_timeout_interruptible(timeout);
1700 }
1701
1702 pr_info("Automatic memory scanning thread ended\n");
1703
1704 return 0;
1705 }
1706
1707
1708
1709
1710
1711 static void start_scan_thread(void)
1712 {
1713 if (scan_thread)
1714 return;
1715 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1716 if (IS_ERR(scan_thread)) {
1717 pr_warn("Failed to create the scan thread\n");
1718 scan_thread = NULL;
1719 }
1720 }
1721
1722
1723
1724
1725 static void stop_scan_thread(void)
1726 {
1727 if (scan_thread) {
1728 kthread_stop(scan_thread);
1729 scan_thread = NULL;
1730 }
1731 }
1732
1733
1734
1735
1736
1737
1738 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1739 {
1740 struct kmemleak_object *object;
1741 loff_t n = *pos;
1742 int err;
1743
1744 err = mutex_lock_interruptible(&scan_mutex);
1745 if (err < 0)
1746 return ERR_PTR(err);
1747
1748 rcu_read_lock();
1749 list_for_each_entry_rcu(object, &object_list, object_list) {
1750 if (n-- > 0)
1751 continue;
1752 if (get_object(object))
1753 goto out;
1754 }
1755 object = NULL;
1756 out:
1757 return object;
1758 }
1759
1760
1761
1762
1763
1764 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1765 {
1766 struct kmemleak_object *prev_obj = v;
1767 struct kmemleak_object *next_obj = NULL;
1768 struct kmemleak_object *obj = prev_obj;
1769
1770 ++(*pos);
1771
1772 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1773 if (get_object(obj)) {
1774 next_obj = obj;
1775 break;
1776 }
1777 }
1778
1779 put_object(prev_obj);
1780 return next_obj;
1781 }
1782
1783
1784
1785
1786 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1787 {
1788 if (!IS_ERR(v)) {
1789
1790
1791
1792
1793 rcu_read_unlock();
1794 mutex_unlock(&scan_mutex);
1795 if (v)
1796 put_object(v);
1797 }
1798 }
1799
1800
1801
1802
1803 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1804 {
1805 struct kmemleak_object *object = v;
1806 unsigned long flags;
1807
1808 raw_spin_lock_irqsave(&object->lock, flags);
1809 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1810 print_unreferenced(seq, object);
1811 raw_spin_unlock_irqrestore(&object->lock, flags);
1812 return 0;
1813 }
1814
1815 static const struct seq_operations kmemleak_seq_ops = {
1816 .start = kmemleak_seq_start,
1817 .next = kmemleak_seq_next,
1818 .stop = kmemleak_seq_stop,
1819 .show = kmemleak_seq_show,
1820 };
1821
1822 static int kmemleak_open(struct inode *inode, struct file *file)
1823 {
1824 return seq_open(file, &kmemleak_seq_ops);
1825 }
1826
1827 static int dump_str_object_info(const char *str)
1828 {
1829 unsigned long flags;
1830 struct kmemleak_object *object;
1831 unsigned long addr;
1832
1833 if (kstrtoul(str, 0, &addr))
1834 return -EINVAL;
1835 object = find_and_get_object(addr, 0);
1836 if (!object) {
1837 pr_info("Unknown object at 0x%08lx\n", addr);
1838 return -EINVAL;
1839 }
1840
1841 raw_spin_lock_irqsave(&object->lock, flags);
1842 dump_object_info(object);
1843 raw_spin_unlock_irqrestore(&object->lock, flags);
1844
1845 put_object(object);
1846 return 0;
1847 }
1848
1849
1850
1851
1852
1853
1854
1855 static void kmemleak_clear(void)
1856 {
1857 struct kmemleak_object *object;
1858
1859 rcu_read_lock();
1860 list_for_each_entry_rcu(object, &object_list, object_list) {
1861 raw_spin_lock_irq(&object->lock);
1862 if ((object->flags & OBJECT_REPORTED) &&
1863 unreferenced_object(object))
1864 __paint_it(object, KMEMLEAK_GREY);
1865 raw_spin_unlock_irq(&object->lock);
1866 }
1867 rcu_read_unlock();
1868
1869 kmemleak_found_leaks = false;
1870 }
1871
1872 static void __kmemleak_do_cleanup(void);
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1891 size_t size, loff_t *ppos)
1892 {
1893 char buf[64];
1894 int buf_size;
1895 int ret;
1896
1897 buf_size = min(size, (sizeof(buf) - 1));
1898 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1899 return -EFAULT;
1900 buf[buf_size] = 0;
1901
1902 ret = mutex_lock_interruptible(&scan_mutex);
1903 if (ret < 0)
1904 return ret;
1905
1906 if (strncmp(buf, "clear", 5) == 0) {
1907 if (kmemleak_enabled)
1908 kmemleak_clear();
1909 else
1910 __kmemleak_do_cleanup();
1911 goto out;
1912 }
1913
1914 if (!kmemleak_enabled) {
1915 ret = -EPERM;
1916 goto out;
1917 }
1918
1919 if (strncmp(buf, "off", 3) == 0)
1920 kmemleak_disable();
1921 else if (strncmp(buf, "stack=on", 8) == 0)
1922 kmemleak_stack_scan = 1;
1923 else if (strncmp(buf, "stack=off", 9) == 0)
1924 kmemleak_stack_scan = 0;
1925 else if (strncmp(buf, "scan=on", 7) == 0)
1926 start_scan_thread();
1927 else if (strncmp(buf, "scan=off", 8) == 0)
1928 stop_scan_thread();
1929 else if (strncmp(buf, "scan=", 5) == 0) {
1930 unsigned secs;
1931 unsigned long msecs;
1932
1933 ret = kstrtouint(buf + 5, 0, &secs);
1934 if (ret < 0)
1935 goto out;
1936
1937 msecs = secs * MSEC_PER_SEC;
1938 if (msecs > UINT_MAX)
1939 msecs = UINT_MAX;
1940
1941 stop_scan_thread();
1942 if (msecs) {
1943 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1944 start_scan_thread();
1945 }
1946 } else if (strncmp(buf, "scan", 4) == 0)
1947 kmemleak_scan();
1948 else if (strncmp(buf, "dump=", 5) == 0)
1949 ret = dump_str_object_info(buf + 5);
1950 else
1951 ret = -EINVAL;
1952
1953 out:
1954 mutex_unlock(&scan_mutex);
1955 if (ret < 0)
1956 return ret;
1957
1958
1959 *ppos += size;
1960 return size;
1961 }
1962
1963 static const struct file_operations kmemleak_fops = {
1964 .owner = THIS_MODULE,
1965 .open = kmemleak_open,
1966 .read = seq_read,
1967 .write = kmemleak_write,
1968 .llseek = seq_lseek,
1969 .release = seq_release,
1970 };
1971
1972 static void __kmemleak_do_cleanup(void)
1973 {
1974 struct kmemleak_object *object, *tmp;
1975
1976
1977
1978
1979
1980 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1981 __remove_object(object);
1982 __delete_object(object);
1983 }
1984 }
1985
1986
1987
1988
1989
1990
1991 static void kmemleak_do_cleanup(struct work_struct *work)
1992 {
1993 stop_scan_thread();
1994
1995 mutex_lock(&scan_mutex);
1996
1997
1998
1999
2000
2001
2002 kmemleak_free_enabled = 0;
2003 mutex_unlock(&scan_mutex);
2004
2005 if (!kmemleak_found_leaks)
2006 __kmemleak_do_cleanup();
2007 else
2008 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2009 }
2010
2011 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2012
2013
2014
2015
2016
2017 static void kmemleak_disable(void)
2018 {
2019
2020 if (cmpxchg(&kmemleak_error, 0, 1))
2021 return;
2022
2023
2024 kmemleak_enabled = 0;
2025
2026
2027 if (kmemleak_initialized)
2028 schedule_work(&cleanup_work);
2029 else
2030 kmemleak_free_enabled = 0;
2031
2032 pr_info("Kernel memory leak detector disabled\n");
2033 }
2034
2035
2036
2037
2038 static int __init kmemleak_boot_config(char *str)
2039 {
2040 if (!str)
2041 return -EINVAL;
2042 if (strcmp(str, "off") == 0)
2043 kmemleak_disable();
2044 else if (strcmp(str, "on") == 0)
2045 kmemleak_skip_disable = 1;
2046 else
2047 return -EINVAL;
2048 return 0;
2049 }
2050 early_param("kmemleak", kmemleak_boot_config);
2051
2052
2053
2054
2055 void __init kmemleak_init(void)
2056 {
2057 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2058 if (!kmemleak_skip_disable) {
2059 kmemleak_disable();
2060 return;
2061 }
2062 #endif
2063
2064 if (kmemleak_error)
2065 return;
2066
2067 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2068 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2069
2070 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2071 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2072
2073
2074 create_object((unsigned long)_sdata, _edata - _sdata,
2075 KMEMLEAK_GREY, GFP_ATOMIC);
2076 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2077 KMEMLEAK_GREY, GFP_ATOMIC);
2078
2079 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2080 create_object((unsigned long)__start_ro_after_init,
2081 __end_ro_after_init - __start_ro_after_init,
2082 KMEMLEAK_GREY, GFP_ATOMIC);
2083 }
2084
2085
2086
2087
2088 static int __init kmemleak_late_init(void)
2089 {
2090 kmemleak_initialized = 1;
2091
2092 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2093
2094 if (kmemleak_error) {
2095
2096
2097
2098
2099
2100
2101 schedule_work(&cleanup_work);
2102 return -ENOMEM;
2103 }
2104
2105 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2106 mutex_lock(&scan_mutex);
2107 start_scan_thread();
2108 mutex_unlock(&scan_mutex);
2109 }
2110
2111 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2112 mem_pool_free_count);
2113
2114 return 0;
2115 }
2116 late_initcall(kmemleak_late_init);