Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2005, Red Hat, Inc., Ingo Molnar
0003  * Released under the General Public License (GPL).
0004  *
0005  * This file contains the spinlock/rwlock implementations for
0006  * DEBUG_SPINLOCK.
0007  */
0008 
0009 #include <linux/spinlock.h>
0010 #include <linux/nmi.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/debug_locks.h>
0013 #include <linux/delay.h>
0014 #include <linux/export.h>
0015 
0016 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
0017               struct lock_class_key *key, short inner)
0018 {
0019 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0020     /*
0021      * Make sure we are not reinitializing a held lock:
0022      */
0023     debug_check_no_locks_freed((void *)lock, sizeof(*lock));
0024     lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
0025 #endif
0026     lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
0027     lock->magic = SPINLOCK_MAGIC;
0028     lock->owner = SPINLOCK_OWNER_INIT;
0029     lock->owner_cpu = -1;
0030 }
0031 
0032 EXPORT_SYMBOL(__raw_spin_lock_init);
0033 
0034 #ifndef CONFIG_PREEMPT_RT
0035 void __rwlock_init(rwlock_t *lock, const char *name,
0036            struct lock_class_key *key)
0037 {
0038 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0039     /*
0040      * Make sure we are not reinitializing a held lock:
0041      */
0042     debug_check_no_locks_freed((void *)lock, sizeof(*lock));
0043     lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
0044 #endif
0045     lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
0046     lock->magic = RWLOCK_MAGIC;
0047     lock->owner = SPINLOCK_OWNER_INIT;
0048     lock->owner_cpu = -1;
0049 }
0050 
0051 EXPORT_SYMBOL(__rwlock_init);
0052 #endif
0053 
0054 static void spin_dump(raw_spinlock_t *lock, const char *msg)
0055 {
0056     struct task_struct *owner = READ_ONCE(lock->owner);
0057 
0058     if (owner == SPINLOCK_OWNER_INIT)
0059         owner = NULL;
0060     printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
0061         msg, raw_smp_processor_id(),
0062         current->comm, task_pid_nr(current));
0063     printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
0064             ".owner_cpu: %d\n",
0065         lock, READ_ONCE(lock->magic),
0066         owner ? owner->comm : "<none>",
0067         owner ? task_pid_nr(owner) : -1,
0068         READ_ONCE(lock->owner_cpu));
0069     dump_stack();
0070 }
0071 
0072 static void spin_bug(raw_spinlock_t *lock, const char *msg)
0073 {
0074     if (!debug_locks_off())
0075         return;
0076 
0077     spin_dump(lock, msg);
0078 }
0079 
0080 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
0081 
0082 static inline void
0083 debug_spin_lock_before(raw_spinlock_t *lock)
0084 {
0085     SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
0086     SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
0087     SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
0088                             lock, "cpu recursion");
0089 }
0090 
0091 static inline void debug_spin_lock_after(raw_spinlock_t *lock)
0092 {
0093     WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
0094     WRITE_ONCE(lock->owner, current);
0095 }
0096 
0097 static inline void debug_spin_unlock(raw_spinlock_t *lock)
0098 {
0099     SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
0100     SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
0101     SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
0102     SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
0103                             lock, "wrong CPU");
0104     WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
0105     WRITE_ONCE(lock->owner_cpu, -1);
0106 }
0107 
0108 /*
0109  * We are now relying on the NMI watchdog to detect lockup instead of doing
0110  * the detection here with an unfair lock which can cause problem of its own.
0111  */
0112 void do_raw_spin_lock(raw_spinlock_t *lock)
0113 {
0114     debug_spin_lock_before(lock);
0115     arch_spin_lock(&lock->raw_lock);
0116     mmiowb_spin_lock();
0117     debug_spin_lock_after(lock);
0118 }
0119 
0120 int do_raw_spin_trylock(raw_spinlock_t *lock)
0121 {
0122     int ret = arch_spin_trylock(&lock->raw_lock);
0123 
0124     if (ret) {
0125         mmiowb_spin_lock();
0126         debug_spin_lock_after(lock);
0127     }
0128 #ifndef CONFIG_SMP
0129     /*
0130      * Must not happen on UP:
0131      */
0132     SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
0133 #endif
0134     return ret;
0135 }
0136 
0137 void do_raw_spin_unlock(raw_spinlock_t *lock)
0138 {
0139     mmiowb_spin_unlock();
0140     debug_spin_unlock(lock);
0141     arch_spin_unlock(&lock->raw_lock);
0142 }
0143 
0144 #ifndef CONFIG_PREEMPT_RT
0145 static void rwlock_bug(rwlock_t *lock, const char *msg)
0146 {
0147     if (!debug_locks_off())
0148         return;
0149 
0150     printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
0151         msg, raw_smp_processor_id(), current->comm,
0152         task_pid_nr(current), lock);
0153     dump_stack();
0154 }
0155 
0156 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
0157 
0158 void do_raw_read_lock(rwlock_t *lock)
0159 {
0160     RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
0161     arch_read_lock(&lock->raw_lock);
0162 }
0163 
0164 int do_raw_read_trylock(rwlock_t *lock)
0165 {
0166     int ret = arch_read_trylock(&lock->raw_lock);
0167 
0168 #ifndef CONFIG_SMP
0169     /*
0170      * Must not happen on UP:
0171      */
0172     RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
0173 #endif
0174     return ret;
0175 }
0176 
0177 void do_raw_read_unlock(rwlock_t *lock)
0178 {
0179     RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
0180     arch_read_unlock(&lock->raw_lock);
0181 }
0182 
0183 static inline void debug_write_lock_before(rwlock_t *lock)
0184 {
0185     RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
0186     RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
0187     RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
0188                             lock, "cpu recursion");
0189 }
0190 
0191 static inline void debug_write_lock_after(rwlock_t *lock)
0192 {
0193     WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
0194     WRITE_ONCE(lock->owner, current);
0195 }
0196 
0197 static inline void debug_write_unlock(rwlock_t *lock)
0198 {
0199     RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
0200     RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
0201     RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
0202                             lock, "wrong CPU");
0203     WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
0204     WRITE_ONCE(lock->owner_cpu, -1);
0205 }
0206 
0207 void do_raw_write_lock(rwlock_t *lock)
0208 {
0209     debug_write_lock_before(lock);
0210     arch_write_lock(&lock->raw_lock);
0211     debug_write_lock_after(lock);
0212 }
0213 
0214 int do_raw_write_trylock(rwlock_t *lock)
0215 {
0216     int ret = arch_write_trylock(&lock->raw_lock);
0217 
0218     if (ret)
0219         debug_write_lock_after(lock);
0220 #ifndef CONFIG_SMP
0221     /*
0222      * Must not happen on UP:
0223      */
0224     RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
0225 #endif
0226     return ret;
0227 }
0228 
0229 void do_raw_write_unlock(rwlock_t *lock)
0230 {
0231     debug_write_unlock(lock);
0232     arch_write_unlock(&lock->raw_lock);
0233 }
0234 
0235 #endif /* !CONFIG_PREEMPT_RT */