Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Generic infrastructure for lifetime debugging of objects.
0004  *
0005  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
0006  */
0007 
0008 #define pr_fmt(fmt) "ODEBUG: " fmt
0009 
0010 #include <linux/debugobjects.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/sched.h>
0013 #include <linux/sched/task_stack.h>
0014 #include <linux/seq_file.h>
0015 #include <linux/debugfs.h>
0016 #include <linux/slab.h>
0017 #include <linux/hash.h>
0018 #include <linux/kmemleak.h>
0019 #include <linux/cpu.h>
0020 
0021 #define ODEBUG_HASH_BITS    14
0022 #define ODEBUG_HASH_SIZE    (1 << ODEBUG_HASH_BITS)
0023 
0024 #define ODEBUG_POOL_SIZE    1024
0025 #define ODEBUG_POOL_MIN_LEVEL   256
0026 #define ODEBUG_POOL_PERCPU_SIZE 64
0027 #define ODEBUG_BATCH_SIZE   16
0028 
0029 #define ODEBUG_CHUNK_SHIFT  PAGE_SHIFT
0030 #define ODEBUG_CHUNK_SIZE   (1 << ODEBUG_CHUNK_SHIFT)
0031 #define ODEBUG_CHUNK_MASK   (~(ODEBUG_CHUNK_SIZE - 1))
0032 
0033 /*
0034  * We limit the freeing of debug objects via workqueue at a maximum
0035  * frequency of 10Hz and about 1024 objects for each freeing operation.
0036  * So it is freeing at most 10k debug objects per second.
0037  */
0038 #define ODEBUG_FREE_WORK_MAX    1024
0039 #define ODEBUG_FREE_WORK_DELAY  DIV_ROUND_UP(HZ, 10)
0040 
0041 struct debug_bucket {
0042     struct hlist_head   list;
0043     raw_spinlock_t      lock;
0044 };
0045 
0046 /*
0047  * Debug object percpu free list
0048  * Access is protected by disabling irq
0049  */
0050 struct debug_percpu_free {
0051     struct hlist_head   free_objs;
0052     int         obj_free;
0053 };
0054 
0055 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
0056 
0057 static struct debug_bucket  obj_hash[ODEBUG_HASH_SIZE];
0058 
0059 static struct debug_obj     obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
0060 
0061 static DEFINE_RAW_SPINLOCK(pool_lock);
0062 
0063 static HLIST_HEAD(obj_pool);
0064 static HLIST_HEAD(obj_to_free);
0065 
0066 /*
0067  * Because of the presence of percpu free pools, obj_pool_free will
0068  * under-count those in the percpu free pools. Similarly, obj_pool_used
0069  * will over-count those in the percpu free pools. Adjustments will be
0070  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
0071  * can be off.
0072  */
0073 static int          obj_pool_min_free = ODEBUG_POOL_SIZE;
0074 static int          obj_pool_free = ODEBUG_POOL_SIZE;
0075 static int          obj_pool_used;
0076 static int          obj_pool_max_used;
0077 static bool         obj_freeing;
0078 /* The number of objs on the global free list */
0079 static int          obj_nr_tofree;
0080 
0081 static int          debug_objects_maxchain __read_mostly;
0082 static int __maybe_unused   debug_objects_maxchecked __read_mostly;
0083 static int          debug_objects_fixups __read_mostly;
0084 static int          debug_objects_warnings __read_mostly;
0085 static int          debug_objects_enabled __read_mostly
0086                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
0087 static int          debug_objects_pool_size __read_mostly
0088                 = ODEBUG_POOL_SIZE;
0089 static int          debug_objects_pool_min_level __read_mostly
0090                 = ODEBUG_POOL_MIN_LEVEL;
0091 static const struct debug_obj_descr *descr_test  __read_mostly;
0092 static struct kmem_cache    *obj_cache __read_mostly;
0093 
0094 /*
0095  * Track numbers of kmem_cache_alloc()/free() calls done.
0096  */
0097 static int          debug_objects_allocated;
0098 static int          debug_objects_freed;
0099 
0100 static void free_obj_work(struct work_struct *work);
0101 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
0102 
0103 static int __init enable_object_debug(char *str)
0104 {
0105     debug_objects_enabled = 1;
0106     return 0;
0107 }
0108 
0109 static int __init disable_object_debug(char *str)
0110 {
0111     debug_objects_enabled = 0;
0112     return 0;
0113 }
0114 
0115 early_param("debug_objects", enable_object_debug);
0116 early_param("no_debug_objects", disable_object_debug);
0117 
0118 static const char *obj_states[ODEBUG_STATE_MAX] = {
0119     [ODEBUG_STATE_NONE]     = "none",
0120     [ODEBUG_STATE_INIT]     = "initialized",
0121     [ODEBUG_STATE_INACTIVE]     = "inactive",
0122     [ODEBUG_STATE_ACTIVE]       = "active",
0123     [ODEBUG_STATE_DESTROYED]    = "destroyed",
0124     [ODEBUG_STATE_NOTAVAILABLE] = "not available",
0125 };
0126 
0127 static void fill_pool(void)
0128 {
0129     gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
0130     struct debug_obj *obj;
0131     unsigned long flags;
0132 
0133     if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
0134         return;
0135 
0136     /*
0137      * Reuse objs from the global free list; they will be reinitialized
0138      * when allocating.
0139      *
0140      * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
0141      * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
0142      * sections.
0143      */
0144     while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
0145         raw_spin_lock_irqsave(&pool_lock, flags);
0146         /*
0147          * Recheck with the lock held as the worker thread might have
0148          * won the race and freed the global free list already.
0149          */
0150         while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
0151             obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
0152             hlist_del(&obj->node);
0153             WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
0154             hlist_add_head(&obj->node, &obj_pool);
0155             WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
0156         }
0157         raw_spin_unlock_irqrestore(&pool_lock, flags);
0158     }
0159 
0160     if (unlikely(!obj_cache))
0161         return;
0162 
0163     while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
0164         struct debug_obj *new[ODEBUG_BATCH_SIZE];
0165         int cnt;
0166 
0167         for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
0168             new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
0169             if (!new[cnt])
0170                 break;
0171         }
0172         if (!cnt)
0173             return;
0174 
0175         raw_spin_lock_irqsave(&pool_lock, flags);
0176         while (cnt) {
0177             hlist_add_head(&new[--cnt]->node, &obj_pool);
0178             debug_objects_allocated++;
0179             WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
0180         }
0181         raw_spin_unlock_irqrestore(&pool_lock, flags);
0182     }
0183 }
0184 
0185 /*
0186  * Lookup an object in the hash bucket.
0187  */
0188 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
0189 {
0190     struct debug_obj *obj;
0191     int cnt = 0;
0192 
0193     hlist_for_each_entry(obj, &b->list, node) {
0194         cnt++;
0195         if (obj->object == addr)
0196             return obj;
0197     }
0198     if (cnt > debug_objects_maxchain)
0199         debug_objects_maxchain = cnt;
0200 
0201     return NULL;
0202 }
0203 
0204 /*
0205  * Allocate a new object from the hlist
0206  */
0207 static struct debug_obj *__alloc_object(struct hlist_head *list)
0208 {
0209     struct debug_obj *obj = NULL;
0210 
0211     if (list->first) {
0212         obj = hlist_entry(list->first, typeof(*obj), node);
0213         hlist_del(&obj->node);
0214     }
0215 
0216     return obj;
0217 }
0218 
0219 /*
0220  * Allocate a new object. If the pool is empty, switch off the debugger.
0221  * Must be called with interrupts disabled.
0222  */
0223 static struct debug_obj *
0224 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
0225 {
0226     struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
0227     struct debug_obj *obj;
0228 
0229     if (likely(obj_cache)) {
0230         obj = __alloc_object(&percpu_pool->free_objs);
0231         if (obj) {
0232             percpu_pool->obj_free--;
0233             goto init_obj;
0234         }
0235     }
0236 
0237     raw_spin_lock(&pool_lock);
0238     obj = __alloc_object(&obj_pool);
0239     if (obj) {
0240         obj_pool_used++;
0241         WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
0242 
0243         /*
0244          * Looking ahead, allocate one batch of debug objects and
0245          * put them into the percpu free pool.
0246          */
0247         if (likely(obj_cache)) {
0248             int i;
0249 
0250             for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
0251                 struct debug_obj *obj2;
0252 
0253                 obj2 = __alloc_object(&obj_pool);
0254                 if (!obj2)
0255                     break;
0256                 hlist_add_head(&obj2->node,
0257                            &percpu_pool->free_objs);
0258                 percpu_pool->obj_free++;
0259                 obj_pool_used++;
0260                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
0261             }
0262         }
0263 
0264         if (obj_pool_used > obj_pool_max_used)
0265             obj_pool_max_used = obj_pool_used;
0266 
0267         if (obj_pool_free < obj_pool_min_free)
0268             obj_pool_min_free = obj_pool_free;
0269     }
0270     raw_spin_unlock(&pool_lock);
0271 
0272 init_obj:
0273     if (obj) {
0274         obj->object = addr;
0275         obj->descr  = descr;
0276         obj->state  = ODEBUG_STATE_NONE;
0277         obj->astate = 0;
0278         hlist_add_head(&obj->node, &b->list);
0279     }
0280     return obj;
0281 }
0282 
0283 /*
0284  * workqueue function to free objects.
0285  *
0286  * To reduce contention on the global pool_lock, the actual freeing of
0287  * debug objects will be delayed if the pool_lock is busy.
0288  */
0289 static void free_obj_work(struct work_struct *work)
0290 {
0291     struct hlist_node *tmp;
0292     struct debug_obj *obj;
0293     unsigned long flags;
0294     HLIST_HEAD(tofree);
0295 
0296     WRITE_ONCE(obj_freeing, false);
0297     if (!raw_spin_trylock_irqsave(&pool_lock, flags))
0298         return;
0299 
0300     if (obj_pool_free >= debug_objects_pool_size)
0301         goto free_objs;
0302 
0303     /*
0304      * The objs on the pool list might be allocated before the work is
0305      * run, so recheck if pool list it full or not, if not fill pool
0306      * list from the global free list. As it is likely that a workload
0307      * may be gearing up to use more and more objects, don't free any
0308      * of them until the next round.
0309      */
0310     while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
0311         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
0312         hlist_del(&obj->node);
0313         hlist_add_head(&obj->node, &obj_pool);
0314         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
0315         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
0316     }
0317     raw_spin_unlock_irqrestore(&pool_lock, flags);
0318     return;
0319 
0320 free_objs:
0321     /*
0322      * Pool list is already full and there are still objs on the free
0323      * list. Move remaining free objs to a temporary list to free the
0324      * memory outside the pool_lock held region.
0325      */
0326     if (obj_nr_tofree) {
0327         hlist_move_list(&obj_to_free, &tofree);
0328         debug_objects_freed += obj_nr_tofree;
0329         WRITE_ONCE(obj_nr_tofree, 0);
0330     }
0331     raw_spin_unlock_irqrestore(&pool_lock, flags);
0332 
0333     hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
0334         hlist_del(&obj->node);
0335         kmem_cache_free(obj_cache, obj);
0336     }
0337 }
0338 
0339 static void __free_object(struct debug_obj *obj)
0340 {
0341     struct debug_obj *objs[ODEBUG_BATCH_SIZE];
0342     struct debug_percpu_free *percpu_pool;
0343     int lookahead_count = 0;
0344     unsigned long flags;
0345     bool work;
0346 
0347     local_irq_save(flags);
0348     if (!obj_cache)
0349         goto free_to_obj_pool;
0350 
0351     /*
0352      * Try to free it into the percpu pool first.
0353      */
0354     percpu_pool = this_cpu_ptr(&percpu_obj_pool);
0355     if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
0356         hlist_add_head(&obj->node, &percpu_pool->free_objs);
0357         percpu_pool->obj_free++;
0358         local_irq_restore(flags);
0359         return;
0360     }
0361 
0362     /*
0363      * As the percpu pool is full, look ahead and pull out a batch
0364      * of objects from the percpu pool and free them as well.
0365      */
0366     for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
0367         objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
0368         if (!objs[lookahead_count])
0369             break;
0370         percpu_pool->obj_free--;
0371     }
0372 
0373 free_to_obj_pool:
0374     raw_spin_lock(&pool_lock);
0375     work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
0376            (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
0377     obj_pool_used--;
0378 
0379     if (work) {
0380         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
0381         hlist_add_head(&obj->node, &obj_to_free);
0382         if (lookahead_count) {
0383             WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
0384             obj_pool_used -= lookahead_count;
0385             while (lookahead_count) {
0386                 hlist_add_head(&objs[--lookahead_count]->node,
0387                            &obj_to_free);
0388             }
0389         }
0390 
0391         if ((obj_pool_free > debug_objects_pool_size) &&
0392             (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
0393             int i;
0394 
0395             /*
0396              * Free one more batch of objects from obj_pool.
0397              */
0398             for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
0399                 obj = __alloc_object(&obj_pool);
0400                 hlist_add_head(&obj->node, &obj_to_free);
0401                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
0402                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
0403             }
0404         }
0405     } else {
0406         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
0407         hlist_add_head(&obj->node, &obj_pool);
0408         if (lookahead_count) {
0409             WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
0410             obj_pool_used -= lookahead_count;
0411             while (lookahead_count) {
0412                 hlist_add_head(&objs[--lookahead_count]->node,
0413                            &obj_pool);
0414             }
0415         }
0416     }
0417     raw_spin_unlock(&pool_lock);
0418     local_irq_restore(flags);
0419 }
0420 
0421 /*
0422  * Put the object back into the pool and schedule work to free objects
0423  * if necessary.
0424  */
0425 static void free_object(struct debug_obj *obj)
0426 {
0427     __free_object(obj);
0428     if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
0429         WRITE_ONCE(obj_freeing, true);
0430         schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
0431     }
0432 }
0433 
0434 #ifdef CONFIG_HOTPLUG_CPU
0435 static int object_cpu_offline(unsigned int cpu)
0436 {
0437     struct debug_percpu_free *percpu_pool;
0438     struct hlist_node *tmp;
0439     struct debug_obj *obj;
0440 
0441     /* Remote access is safe as the CPU is dead already */
0442     percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
0443     hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
0444         hlist_del(&obj->node);
0445         kmem_cache_free(obj_cache, obj);
0446     }
0447     percpu_pool->obj_free = 0;
0448 
0449     return 0;
0450 }
0451 #endif
0452 
0453 /*
0454  * We run out of memory. That means we probably have tons of objects
0455  * allocated.
0456  */
0457 static void debug_objects_oom(void)
0458 {
0459     struct debug_bucket *db = obj_hash;
0460     struct hlist_node *tmp;
0461     HLIST_HEAD(freelist);
0462     struct debug_obj *obj;
0463     unsigned long flags;
0464     int i;
0465 
0466     pr_warn("Out of memory. ODEBUG disabled\n");
0467 
0468     for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
0469         raw_spin_lock_irqsave(&db->lock, flags);
0470         hlist_move_list(&db->list, &freelist);
0471         raw_spin_unlock_irqrestore(&db->lock, flags);
0472 
0473         /* Now free them */
0474         hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
0475             hlist_del(&obj->node);
0476             free_object(obj);
0477         }
0478     }
0479 }
0480 
0481 /*
0482  * We use the pfn of the address for the hash. That way we can check
0483  * for freed objects simply by checking the affected bucket.
0484  */
0485 static struct debug_bucket *get_bucket(unsigned long addr)
0486 {
0487     unsigned long hash;
0488 
0489     hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
0490     return &obj_hash[hash];
0491 }
0492 
0493 static void debug_print_object(struct debug_obj *obj, char *msg)
0494 {
0495     const struct debug_obj_descr *descr = obj->descr;
0496     static int limit;
0497 
0498     if (limit < 5 && descr != descr_test) {
0499         void *hint = descr->debug_hint ?
0500             descr->debug_hint(obj->object) : NULL;
0501         limit++;
0502         WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
0503                  "object type: %s hint: %pS\n",
0504             msg, obj_states[obj->state], obj->astate,
0505             descr->name, hint);
0506     }
0507     debug_objects_warnings++;
0508 }
0509 
0510 /*
0511  * Try to repair the damage, so we have a better chance to get useful
0512  * debug output.
0513  */
0514 static bool
0515 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
0516            void * addr, enum debug_obj_state state)
0517 {
0518     if (fixup && fixup(addr, state)) {
0519         debug_objects_fixups++;
0520         return true;
0521     }
0522     return false;
0523 }
0524 
0525 static void debug_object_is_on_stack(void *addr, int onstack)
0526 {
0527     int is_on_stack;
0528     static int limit;
0529 
0530     if (limit > 4)
0531         return;
0532 
0533     is_on_stack = object_is_on_stack(addr);
0534     if (is_on_stack == onstack)
0535         return;
0536 
0537     limit++;
0538     if (is_on_stack)
0539         pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
0540              task_stack_page(current));
0541     else
0542         pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
0543              task_stack_page(current));
0544 
0545     WARN_ON(1);
0546 }
0547 
0548 static void
0549 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
0550 {
0551     enum debug_obj_state state;
0552     bool check_stack = false;
0553     struct debug_bucket *db;
0554     struct debug_obj *obj;
0555     unsigned long flags;
0556 
0557     /*
0558      * On RT enabled kernels the pool refill must happen in preemptible
0559      * context:
0560      */
0561     if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
0562         fill_pool();
0563 
0564     db = get_bucket((unsigned long) addr);
0565 
0566     raw_spin_lock_irqsave(&db->lock, flags);
0567 
0568     obj = lookup_object(addr, db);
0569     if (!obj) {
0570         obj = alloc_object(addr, db, descr);
0571         if (!obj) {
0572             debug_objects_enabled = 0;
0573             raw_spin_unlock_irqrestore(&db->lock, flags);
0574             debug_objects_oom();
0575             return;
0576         }
0577         check_stack = true;
0578     }
0579 
0580     switch (obj->state) {
0581     case ODEBUG_STATE_NONE:
0582     case ODEBUG_STATE_INIT:
0583     case ODEBUG_STATE_INACTIVE:
0584         obj->state = ODEBUG_STATE_INIT;
0585         break;
0586 
0587     case ODEBUG_STATE_ACTIVE:
0588         state = obj->state;
0589         raw_spin_unlock_irqrestore(&db->lock, flags);
0590         debug_print_object(obj, "init");
0591         debug_object_fixup(descr->fixup_init, addr, state);
0592         return;
0593 
0594     case ODEBUG_STATE_DESTROYED:
0595         raw_spin_unlock_irqrestore(&db->lock, flags);
0596         debug_print_object(obj, "init");
0597         return;
0598     default:
0599         break;
0600     }
0601 
0602     raw_spin_unlock_irqrestore(&db->lock, flags);
0603     if (check_stack)
0604         debug_object_is_on_stack(addr, onstack);
0605 }
0606 
0607 /**
0608  * debug_object_init - debug checks when an object is initialized
0609  * @addr:   address of the object
0610  * @descr:  pointer to an object specific debug description structure
0611  */
0612 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
0613 {
0614     if (!debug_objects_enabled)
0615         return;
0616 
0617     __debug_object_init(addr, descr, 0);
0618 }
0619 EXPORT_SYMBOL_GPL(debug_object_init);
0620 
0621 /**
0622  * debug_object_init_on_stack - debug checks when an object on stack is
0623  *              initialized
0624  * @addr:   address of the object
0625  * @descr:  pointer to an object specific debug description structure
0626  */
0627 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
0628 {
0629     if (!debug_objects_enabled)
0630         return;
0631 
0632     __debug_object_init(addr, descr, 1);
0633 }
0634 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
0635 
0636 /**
0637  * debug_object_activate - debug checks when an object is activated
0638  * @addr:   address of the object
0639  * @descr:  pointer to an object specific debug description structure
0640  * Returns 0 for success, -EINVAL for check failed.
0641  */
0642 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
0643 {
0644     enum debug_obj_state state;
0645     struct debug_bucket *db;
0646     struct debug_obj *obj;
0647     unsigned long flags;
0648     int ret;
0649     struct debug_obj o = { .object = addr,
0650                    .state = ODEBUG_STATE_NOTAVAILABLE,
0651                    .descr = descr };
0652 
0653     if (!debug_objects_enabled)
0654         return 0;
0655 
0656     db = get_bucket((unsigned long) addr);
0657 
0658     raw_spin_lock_irqsave(&db->lock, flags);
0659 
0660     obj = lookup_object(addr, db);
0661     if (obj) {
0662         bool print_object = false;
0663 
0664         switch (obj->state) {
0665         case ODEBUG_STATE_INIT:
0666         case ODEBUG_STATE_INACTIVE:
0667             obj->state = ODEBUG_STATE_ACTIVE;
0668             ret = 0;
0669             break;
0670 
0671         case ODEBUG_STATE_ACTIVE:
0672             state = obj->state;
0673             raw_spin_unlock_irqrestore(&db->lock, flags);
0674             debug_print_object(obj, "activate");
0675             ret = debug_object_fixup(descr->fixup_activate, addr, state);
0676             return ret ? 0 : -EINVAL;
0677 
0678         case ODEBUG_STATE_DESTROYED:
0679             print_object = true;
0680             ret = -EINVAL;
0681             break;
0682         default:
0683             ret = 0;
0684             break;
0685         }
0686         raw_spin_unlock_irqrestore(&db->lock, flags);
0687         if (print_object)
0688             debug_print_object(obj, "activate");
0689         return ret;
0690     }
0691 
0692     raw_spin_unlock_irqrestore(&db->lock, flags);
0693 
0694     /*
0695      * We are here when a static object is activated. We
0696      * let the type specific code confirm whether this is
0697      * true or not. if true, we just make sure that the
0698      * static object is tracked in the object tracker. If
0699      * not, this must be a bug, so we try to fix it up.
0700      */
0701     if (descr->is_static_object && descr->is_static_object(addr)) {
0702         /* track this static object */
0703         debug_object_init(addr, descr);
0704         debug_object_activate(addr, descr);
0705     } else {
0706         debug_print_object(&o, "activate");
0707         ret = debug_object_fixup(descr->fixup_activate, addr,
0708                     ODEBUG_STATE_NOTAVAILABLE);
0709         return ret ? 0 : -EINVAL;
0710     }
0711     return 0;
0712 }
0713 EXPORT_SYMBOL_GPL(debug_object_activate);
0714 
0715 /**
0716  * debug_object_deactivate - debug checks when an object is deactivated
0717  * @addr:   address of the object
0718  * @descr:  pointer to an object specific debug description structure
0719  */
0720 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
0721 {
0722     struct debug_bucket *db;
0723     struct debug_obj *obj;
0724     unsigned long flags;
0725     bool print_object = false;
0726 
0727     if (!debug_objects_enabled)
0728         return;
0729 
0730     db = get_bucket((unsigned long) addr);
0731 
0732     raw_spin_lock_irqsave(&db->lock, flags);
0733 
0734     obj = lookup_object(addr, db);
0735     if (obj) {
0736         switch (obj->state) {
0737         case ODEBUG_STATE_INIT:
0738         case ODEBUG_STATE_INACTIVE:
0739         case ODEBUG_STATE_ACTIVE:
0740             if (!obj->astate)
0741                 obj->state = ODEBUG_STATE_INACTIVE;
0742             else
0743                 print_object = true;
0744             break;
0745 
0746         case ODEBUG_STATE_DESTROYED:
0747             print_object = true;
0748             break;
0749         default:
0750             break;
0751         }
0752     }
0753 
0754     raw_spin_unlock_irqrestore(&db->lock, flags);
0755     if (!obj) {
0756         struct debug_obj o = { .object = addr,
0757                        .state = ODEBUG_STATE_NOTAVAILABLE,
0758                        .descr = descr };
0759 
0760         debug_print_object(&o, "deactivate");
0761     } else if (print_object) {
0762         debug_print_object(obj, "deactivate");
0763     }
0764 }
0765 EXPORT_SYMBOL_GPL(debug_object_deactivate);
0766 
0767 /**
0768  * debug_object_destroy - debug checks when an object is destroyed
0769  * @addr:   address of the object
0770  * @descr:  pointer to an object specific debug description structure
0771  */
0772 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
0773 {
0774     enum debug_obj_state state;
0775     struct debug_bucket *db;
0776     struct debug_obj *obj;
0777     unsigned long flags;
0778     bool print_object = false;
0779 
0780     if (!debug_objects_enabled)
0781         return;
0782 
0783     db = get_bucket((unsigned long) addr);
0784 
0785     raw_spin_lock_irqsave(&db->lock, flags);
0786 
0787     obj = lookup_object(addr, db);
0788     if (!obj)
0789         goto out_unlock;
0790 
0791     switch (obj->state) {
0792     case ODEBUG_STATE_NONE:
0793     case ODEBUG_STATE_INIT:
0794     case ODEBUG_STATE_INACTIVE:
0795         obj->state = ODEBUG_STATE_DESTROYED;
0796         break;
0797     case ODEBUG_STATE_ACTIVE:
0798         state = obj->state;
0799         raw_spin_unlock_irqrestore(&db->lock, flags);
0800         debug_print_object(obj, "destroy");
0801         debug_object_fixup(descr->fixup_destroy, addr, state);
0802         return;
0803 
0804     case ODEBUG_STATE_DESTROYED:
0805         print_object = true;
0806         break;
0807     default:
0808         break;
0809     }
0810 out_unlock:
0811     raw_spin_unlock_irqrestore(&db->lock, flags);
0812     if (print_object)
0813         debug_print_object(obj, "destroy");
0814 }
0815 EXPORT_SYMBOL_GPL(debug_object_destroy);
0816 
0817 /**
0818  * debug_object_free - debug checks when an object is freed
0819  * @addr:   address of the object
0820  * @descr:  pointer to an object specific debug description structure
0821  */
0822 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
0823 {
0824     enum debug_obj_state state;
0825     struct debug_bucket *db;
0826     struct debug_obj *obj;
0827     unsigned long flags;
0828 
0829     if (!debug_objects_enabled)
0830         return;
0831 
0832     db = get_bucket((unsigned long) addr);
0833 
0834     raw_spin_lock_irqsave(&db->lock, flags);
0835 
0836     obj = lookup_object(addr, db);
0837     if (!obj)
0838         goto out_unlock;
0839 
0840     switch (obj->state) {
0841     case ODEBUG_STATE_ACTIVE:
0842         state = obj->state;
0843         raw_spin_unlock_irqrestore(&db->lock, flags);
0844         debug_print_object(obj, "free");
0845         debug_object_fixup(descr->fixup_free, addr, state);
0846         return;
0847     default:
0848         hlist_del(&obj->node);
0849         raw_spin_unlock_irqrestore(&db->lock, flags);
0850         free_object(obj);
0851         return;
0852     }
0853 out_unlock:
0854     raw_spin_unlock_irqrestore(&db->lock, flags);
0855 }
0856 EXPORT_SYMBOL_GPL(debug_object_free);
0857 
0858 /**
0859  * debug_object_assert_init - debug checks when object should be init-ed
0860  * @addr:   address of the object
0861  * @descr:  pointer to an object specific debug description structure
0862  */
0863 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
0864 {
0865     struct debug_bucket *db;
0866     struct debug_obj *obj;
0867     unsigned long flags;
0868 
0869     if (!debug_objects_enabled)
0870         return;
0871 
0872     db = get_bucket((unsigned long) addr);
0873 
0874     raw_spin_lock_irqsave(&db->lock, flags);
0875 
0876     obj = lookup_object(addr, db);
0877     if (!obj) {
0878         struct debug_obj o = { .object = addr,
0879                        .state = ODEBUG_STATE_NOTAVAILABLE,
0880                        .descr = descr };
0881 
0882         raw_spin_unlock_irqrestore(&db->lock, flags);
0883         /*
0884          * Maybe the object is static, and we let the type specific
0885          * code confirm. Track this static object if true, else invoke
0886          * fixup.
0887          */
0888         if (descr->is_static_object && descr->is_static_object(addr)) {
0889             /* Track this static object */
0890             debug_object_init(addr, descr);
0891         } else {
0892             debug_print_object(&o, "assert_init");
0893             debug_object_fixup(descr->fixup_assert_init, addr,
0894                        ODEBUG_STATE_NOTAVAILABLE);
0895         }
0896         return;
0897     }
0898 
0899     raw_spin_unlock_irqrestore(&db->lock, flags);
0900 }
0901 EXPORT_SYMBOL_GPL(debug_object_assert_init);
0902 
0903 /**
0904  * debug_object_active_state - debug checks object usage state machine
0905  * @addr:   address of the object
0906  * @descr:  pointer to an object specific debug description structure
0907  * @expect: expected state
0908  * @next:   state to move to if expected state is found
0909  */
0910 void
0911 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
0912               unsigned int expect, unsigned int next)
0913 {
0914     struct debug_bucket *db;
0915     struct debug_obj *obj;
0916     unsigned long flags;
0917     bool print_object = false;
0918 
0919     if (!debug_objects_enabled)
0920         return;
0921 
0922     db = get_bucket((unsigned long) addr);
0923 
0924     raw_spin_lock_irqsave(&db->lock, flags);
0925 
0926     obj = lookup_object(addr, db);
0927     if (obj) {
0928         switch (obj->state) {
0929         case ODEBUG_STATE_ACTIVE:
0930             if (obj->astate == expect)
0931                 obj->astate = next;
0932             else
0933                 print_object = true;
0934             break;
0935 
0936         default:
0937             print_object = true;
0938             break;
0939         }
0940     }
0941 
0942     raw_spin_unlock_irqrestore(&db->lock, flags);
0943     if (!obj) {
0944         struct debug_obj o = { .object = addr,
0945                        .state = ODEBUG_STATE_NOTAVAILABLE,
0946                        .descr = descr };
0947 
0948         debug_print_object(&o, "active_state");
0949     } else if (print_object) {
0950         debug_print_object(obj, "active_state");
0951     }
0952 }
0953 EXPORT_SYMBOL_GPL(debug_object_active_state);
0954 
0955 #ifdef CONFIG_DEBUG_OBJECTS_FREE
0956 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
0957 {
0958     unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
0959     const struct debug_obj_descr *descr;
0960     enum debug_obj_state state;
0961     struct debug_bucket *db;
0962     struct hlist_node *tmp;
0963     struct debug_obj *obj;
0964     int cnt, objs_checked = 0;
0965 
0966     saddr = (unsigned long) address;
0967     eaddr = saddr + size;
0968     paddr = saddr & ODEBUG_CHUNK_MASK;
0969     chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
0970     chunks >>= ODEBUG_CHUNK_SHIFT;
0971 
0972     for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
0973         db = get_bucket(paddr);
0974 
0975 repeat:
0976         cnt = 0;
0977         raw_spin_lock_irqsave(&db->lock, flags);
0978         hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
0979             cnt++;
0980             oaddr = (unsigned long) obj->object;
0981             if (oaddr < saddr || oaddr >= eaddr)
0982                 continue;
0983 
0984             switch (obj->state) {
0985             case ODEBUG_STATE_ACTIVE:
0986                 descr = obj->descr;
0987                 state = obj->state;
0988                 raw_spin_unlock_irqrestore(&db->lock, flags);
0989                 debug_print_object(obj, "free");
0990                 debug_object_fixup(descr->fixup_free,
0991                            (void *) oaddr, state);
0992                 goto repeat;
0993             default:
0994                 hlist_del(&obj->node);
0995                 __free_object(obj);
0996                 break;
0997             }
0998         }
0999         raw_spin_unlock_irqrestore(&db->lock, flags);
1000 
1001         if (cnt > debug_objects_maxchain)
1002             debug_objects_maxchain = cnt;
1003 
1004         objs_checked += cnt;
1005     }
1006 
1007     if (objs_checked > debug_objects_maxchecked)
1008         debug_objects_maxchecked = objs_checked;
1009 
1010     /* Schedule work to actually kmem_cache_free() objects */
1011     if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1012         WRITE_ONCE(obj_freeing, true);
1013         schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1014     }
1015 }
1016 
1017 void debug_check_no_obj_freed(const void *address, unsigned long size)
1018 {
1019     if (debug_objects_enabled)
1020         __debug_check_no_obj_freed(address, size);
1021 }
1022 #endif
1023 
1024 #ifdef CONFIG_DEBUG_FS
1025 
1026 static int debug_stats_show(struct seq_file *m, void *v)
1027 {
1028     int cpu, obj_percpu_free = 0;
1029 
1030     for_each_possible_cpu(cpu)
1031         obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1032 
1033     seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1034     seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1035     seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1036     seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1037     seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1038     seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1039     seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1040     seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1041     seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1042     seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1043     seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1044     seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1045     return 0;
1046 }
1047 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1048 
1049 static int __init debug_objects_init_debugfs(void)
1050 {
1051     struct dentry *dbgdir;
1052 
1053     if (!debug_objects_enabled)
1054         return 0;
1055 
1056     dbgdir = debugfs_create_dir("debug_objects", NULL);
1057 
1058     debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1059 
1060     return 0;
1061 }
1062 __initcall(debug_objects_init_debugfs);
1063 
1064 #else
1065 static inline void debug_objects_init_debugfs(void) { }
1066 #endif
1067 
1068 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1069 
1070 /* Random data structure for the self test */
1071 struct self_test {
1072     unsigned long   dummy1[6];
1073     int     static_init;
1074     unsigned long   dummy2[3];
1075 };
1076 
1077 static __initconst const struct debug_obj_descr descr_type_test;
1078 
1079 static bool __init is_static_object(void *addr)
1080 {
1081     struct self_test *obj = addr;
1082 
1083     return obj->static_init;
1084 }
1085 
1086 /*
1087  * fixup_init is called when:
1088  * - an active object is initialized
1089  */
1090 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1091 {
1092     struct self_test *obj = addr;
1093 
1094     switch (state) {
1095     case ODEBUG_STATE_ACTIVE:
1096         debug_object_deactivate(obj, &descr_type_test);
1097         debug_object_init(obj, &descr_type_test);
1098         return true;
1099     default:
1100         return false;
1101     }
1102 }
1103 
1104 /*
1105  * fixup_activate is called when:
1106  * - an active object is activated
1107  * - an unknown non-static object is activated
1108  */
1109 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1110 {
1111     struct self_test *obj = addr;
1112 
1113     switch (state) {
1114     case ODEBUG_STATE_NOTAVAILABLE:
1115         return true;
1116     case ODEBUG_STATE_ACTIVE:
1117         debug_object_deactivate(obj, &descr_type_test);
1118         debug_object_activate(obj, &descr_type_test);
1119         return true;
1120 
1121     default:
1122         return false;
1123     }
1124 }
1125 
1126 /*
1127  * fixup_destroy is called when:
1128  * - an active object is destroyed
1129  */
1130 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1131 {
1132     struct self_test *obj = addr;
1133 
1134     switch (state) {
1135     case ODEBUG_STATE_ACTIVE:
1136         debug_object_deactivate(obj, &descr_type_test);
1137         debug_object_destroy(obj, &descr_type_test);
1138         return true;
1139     default:
1140         return false;
1141     }
1142 }
1143 
1144 /*
1145  * fixup_free is called when:
1146  * - an active object is freed
1147  */
1148 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1149 {
1150     struct self_test *obj = addr;
1151 
1152     switch (state) {
1153     case ODEBUG_STATE_ACTIVE:
1154         debug_object_deactivate(obj, &descr_type_test);
1155         debug_object_free(obj, &descr_type_test);
1156         return true;
1157     default:
1158         return false;
1159     }
1160 }
1161 
1162 static int __init
1163 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1164 {
1165     struct debug_bucket *db;
1166     struct debug_obj *obj;
1167     unsigned long flags;
1168     int res = -EINVAL;
1169 
1170     db = get_bucket((unsigned long) addr);
1171 
1172     raw_spin_lock_irqsave(&db->lock, flags);
1173 
1174     obj = lookup_object(addr, db);
1175     if (!obj && state != ODEBUG_STATE_NONE) {
1176         WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1177         goto out;
1178     }
1179     if (obj && obj->state != state) {
1180         WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1181                obj->state, state);
1182         goto out;
1183     }
1184     if (fixups != debug_objects_fixups) {
1185         WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1186                fixups, debug_objects_fixups);
1187         goto out;
1188     }
1189     if (warnings != debug_objects_warnings) {
1190         WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1191                warnings, debug_objects_warnings);
1192         goto out;
1193     }
1194     res = 0;
1195 out:
1196     raw_spin_unlock_irqrestore(&db->lock, flags);
1197     if (res)
1198         debug_objects_enabled = 0;
1199     return res;
1200 }
1201 
1202 static __initconst const struct debug_obj_descr descr_type_test = {
1203     .name           = "selftest",
1204     .is_static_object   = is_static_object,
1205     .fixup_init     = fixup_init,
1206     .fixup_activate     = fixup_activate,
1207     .fixup_destroy      = fixup_destroy,
1208     .fixup_free     = fixup_free,
1209 };
1210 
1211 static __initdata struct self_test obj = { .static_init = 0 };
1212 
1213 static void __init debug_objects_selftest(void)
1214 {
1215     int fixups, oldfixups, warnings, oldwarnings;
1216     unsigned long flags;
1217 
1218     local_irq_save(flags);
1219 
1220     fixups = oldfixups = debug_objects_fixups;
1221     warnings = oldwarnings = debug_objects_warnings;
1222     descr_test = &descr_type_test;
1223 
1224     debug_object_init(&obj, &descr_type_test);
1225     if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1226         goto out;
1227     debug_object_activate(&obj, &descr_type_test);
1228     if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1229         goto out;
1230     debug_object_activate(&obj, &descr_type_test);
1231     if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1232         goto out;
1233     debug_object_deactivate(&obj, &descr_type_test);
1234     if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1235         goto out;
1236     debug_object_destroy(&obj, &descr_type_test);
1237     if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1238         goto out;
1239     debug_object_init(&obj, &descr_type_test);
1240     if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1241         goto out;
1242     debug_object_activate(&obj, &descr_type_test);
1243     if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1244         goto out;
1245     debug_object_deactivate(&obj, &descr_type_test);
1246     if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1247         goto out;
1248     debug_object_free(&obj, &descr_type_test);
1249     if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1250         goto out;
1251 
1252     obj.static_init = 1;
1253     debug_object_activate(&obj, &descr_type_test);
1254     if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1255         goto out;
1256     debug_object_init(&obj, &descr_type_test);
1257     if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1258         goto out;
1259     debug_object_free(&obj, &descr_type_test);
1260     if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1261         goto out;
1262 
1263 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1264     debug_object_init(&obj, &descr_type_test);
1265     if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1266         goto out;
1267     debug_object_activate(&obj, &descr_type_test);
1268     if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1269         goto out;
1270     __debug_check_no_obj_freed(&obj, sizeof(obj));
1271     if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1272         goto out;
1273 #endif
1274     pr_info("selftest passed\n");
1275 
1276 out:
1277     debug_objects_fixups = oldfixups;
1278     debug_objects_warnings = oldwarnings;
1279     descr_test = NULL;
1280 
1281     local_irq_restore(flags);
1282 }
1283 #else
1284 static inline void debug_objects_selftest(void) { }
1285 #endif
1286 
1287 /*
1288  * Called during early boot to initialize the hash buckets and link
1289  * the static object pool objects into the poll list. After this call
1290  * the object tracker is fully operational.
1291  */
1292 void __init debug_objects_early_init(void)
1293 {
1294     int i;
1295 
1296     for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1297         raw_spin_lock_init(&obj_hash[i].lock);
1298 
1299     for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1300         hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1301 }
1302 
1303 /*
1304  * Convert the statically allocated objects to dynamic ones:
1305  */
1306 static int __init debug_objects_replace_static_objects(void)
1307 {
1308     struct debug_bucket *db = obj_hash;
1309     struct hlist_node *tmp;
1310     struct debug_obj *obj, *new;
1311     HLIST_HEAD(objects);
1312     int i, cnt = 0;
1313 
1314     for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1315         obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1316         if (!obj)
1317             goto free;
1318         hlist_add_head(&obj->node, &objects);
1319     }
1320 
1321     /*
1322      * debug_objects_mem_init() is now called early that only one CPU is up
1323      * and interrupts have been disabled, so it is safe to replace the
1324      * active object references.
1325      */
1326 
1327     /* Remove the statically allocated objects from the pool */
1328     hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1329         hlist_del(&obj->node);
1330     /* Move the allocated objects to the pool */
1331     hlist_move_list(&objects, &obj_pool);
1332 
1333     /* Replace the active object references */
1334     for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1335         hlist_move_list(&db->list, &objects);
1336 
1337         hlist_for_each_entry(obj, &objects, node) {
1338             new = hlist_entry(obj_pool.first, typeof(*obj), node);
1339             hlist_del(&new->node);
1340             /* copy object data */
1341             *new = *obj;
1342             hlist_add_head(&new->node, &db->list);
1343             cnt++;
1344         }
1345     }
1346 
1347     pr_debug("%d of %d active objects replaced\n",
1348          cnt, obj_pool_used);
1349     return 0;
1350 free:
1351     hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1352         hlist_del(&obj->node);
1353         kmem_cache_free(obj_cache, obj);
1354     }
1355     return -ENOMEM;
1356 }
1357 
1358 /*
1359  * Called after the kmem_caches are functional to setup a dedicated
1360  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1361  * prevents that the debug code is called on kmem_cache_free() for the
1362  * debug tracker objects to avoid recursive calls.
1363  */
1364 void __init debug_objects_mem_init(void)
1365 {
1366     int cpu, extras;
1367 
1368     if (!debug_objects_enabled)
1369         return;
1370 
1371     /*
1372      * Initialize the percpu object pools
1373      *
1374      * Initialization is not strictly necessary, but was done for
1375      * completeness.
1376      */
1377     for_each_possible_cpu(cpu)
1378         INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1379 
1380     obj_cache = kmem_cache_create("debug_objects_cache",
1381                       sizeof (struct debug_obj), 0,
1382                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1383                       NULL);
1384 
1385     if (!obj_cache || debug_objects_replace_static_objects()) {
1386         debug_objects_enabled = 0;
1387         kmem_cache_destroy(obj_cache);
1388         pr_warn("out of memory.\n");
1389     } else
1390         debug_objects_selftest();
1391 
1392 #ifdef CONFIG_HOTPLUG_CPU
1393     cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1394                     object_cpu_offline);
1395 #endif
1396 
1397     /*
1398      * Increase the thresholds for allocating and freeing objects
1399      * according to the number of possible CPUs available in the system.
1400      */
1401     extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1402     debug_objects_pool_size += extras;
1403     debug_objects_pool_min_level += extras;
1404 }