Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 #include <linux/export.h>
0003 #include <linux/ref_tracker.h>
0004 #include <linux/slab.h>
0005 #include <linux/stacktrace.h>
0006 #include <linux/stackdepot.h>
0007 
0008 #define REF_TRACKER_STACK_ENTRIES 16
0009 
0010 struct ref_tracker {
0011     struct list_head    head;   /* anchor into dir->list or dir->quarantine */
0012     bool            dead;
0013     depot_stack_handle_t    alloc_stack_handle;
0014     depot_stack_handle_t    free_stack_handle;
0015 };
0016 
0017 void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
0018 {
0019     struct ref_tracker *tracker, *n;
0020     unsigned long flags;
0021     bool leak = false;
0022 
0023     dir->dead = true;
0024     spin_lock_irqsave(&dir->lock, flags);
0025     list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
0026         list_del(&tracker->head);
0027         kfree(tracker);
0028         dir->quarantine_avail++;
0029     }
0030     list_for_each_entry_safe(tracker, n, &dir->list, head) {
0031         pr_err("leaked reference.\n");
0032         if (tracker->alloc_stack_handle)
0033             stack_depot_print(tracker->alloc_stack_handle);
0034         leak = true;
0035         list_del(&tracker->head);
0036         kfree(tracker);
0037     }
0038     spin_unlock_irqrestore(&dir->lock, flags);
0039     WARN_ON_ONCE(leak);
0040     WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
0041     WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1);
0042 }
0043 EXPORT_SYMBOL(ref_tracker_dir_exit);
0044 
0045 void ref_tracker_dir_print(struct ref_tracker_dir *dir,
0046                unsigned int display_limit)
0047 {
0048     struct ref_tracker *tracker;
0049     unsigned long flags;
0050     unsigned int i = 0;
0051 
0052     spin_lock_irqsave(&dir->lock, flags);
0053     list_for_each_entry(tracker, &dir->list, head) {
0054         if (i < display_limit) {
0055             pr_err("leaked reference.\n");
0056             if (tracker->alloc_stack_handle)
0057                 stack_depot_print(tracker->alloc_stack_handle);
0058             i++;
0059         } else {
0060             break;
0061         }
0062     }
0063     spin_unlock_irqrestore(&dir->lock, flags);
0064 }
0065 EXPORT_SYMBOL(ref_tracker_dir_print);
0066 
0067 int ref_tracker_alloc(struct ref_tracker_dir *dir,
0068               struct ref_tracker **trackerp,
0069               gfp_t gfp)
0070 {
0071     unsigned long entries[REF_TRACKER_STACK_ENTRIES];
0072     struct ref_tracker *tracker;
0073     unsigned int nr_entries;
0074     gfp_t gfp_mask = gfp;
0075     unsigned long flags;
0076 
0077     WARN_ON_ONCE(dir->dead);
0078 
0079     if (!trackerp) {
0080         refcount_inc(&dir->no_tracker);
0081         return 0;
0082     }
0083     if (gfp & __GFP_DIRECT_RECLAIM)
0084         gfp_mask |= __GFP_NOFAIL;
0085     *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
0086     if (unlikely(!tracker)) {
0087         pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
0088         refcount_inc(&dir->untracked);
0089         return -ENOMEM;
0090     }
0091     nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
0092     tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
0093 
0094     spin_lock_irqsave(&dir->lock, flags);
0095     list_add(&tracker->head, &dir->list);
0096     spin_unlock_irqrestore(&dir->lock, flags);
0097     return 0;
0098 }
0099 EXPORT_SYMBOL_GPL(ref_tracker_alloc);
0100 
0101 int ref_tracker_free(struct ref_tracker_dir *dir,
0102              struct ref_tracker **trackerp)
0103 {
0104     unsigned long entries[REF_TRACKER_STACK_ENTRIES];
0105     depot_stack_handle_t stack_handle;
0106     struct ref_tracker *tracker;
0107     unsigned int nr_entries;
0108     unsigned long flags;
0109 
0110     WARN_ON_ONCE(dir->dead);
0111 
0112     if (!trackerp) {
0113         refcount_dec(&dir->no_tracker);
0114         return 0;
0115     }
0116     tracker = *trackerp;
0117     if (!tracker) {
0118         refcount_dec(&dir->untracked);
0119         return -EEXIST;
0120     }
0121     nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
0122     stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC);
0123 
0124     spin_lock_irqsave(&dir->lock, flags);
0125     if (tracker->dead) {
0126         pr_err("reference already released.\n");
0127         if (tracker->alloc_stack_handle) {
0128             pr_err("allocated in:\n");
0129             stack_depot_print(tracker->alloc_stack_handle);
0130         }
0131         if (tracker->free_stack_handle) {
0132             pr_err("freed in:\n");
0133             stack_depot_print(tracker->free_stack_handle);
0134         }
0135         spin_unlock_irqrestore(&dir->lock, flags);
0136         WARN_ON_ONCE(1);
0137         return -EINVAL;
0138     }
0139     tracker->dead = true;
0140 
0141     tracker->free_stack_handle = stack_handle;
0142 
0143     list_move_tail(&tracker->head, &dir->quarantine);
0144     if (!dir->quarantine_avail) {
0145         tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
0146         list_del(&tracker->head);
0147     } else {
0148         dir->quarantine_avail--;
0149         tracker = NULL;
0150     }
0151     spin_unlock_irqrestore(&dir->lock, flags);
0152 
0153     kfree(tracker);
0154     return 0;
0155 }
0156 EXPORT_SYMBOL_GPL(ref_tracker_free);