Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_PERCPU_RWSEM_H
0003 #define _LINUX_PERCPU_RWSEM_H
0004 
0005 #include <linux/atomic.h>
0006 #include <linux/percpu.h>
0007 #include <linux/rcuwait.h>
0008 #include <linux/wait.h>
0009 #include <linux/rcu_sync.h>
0010 #include <linux/lockdep.h>
0011 
0012 struct percpu_rw_semaphore {
0013     struct rcu_sync     rss;
0014     unsigned int __percpu   *read_count;
0015     struct rcuwait      writer;
0016     wait_queue_head_t   waiters;
0017     atomic_t        block;
0018 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0019     struct lockdep_map  dep_map;
0020 #endif
0021 };
0022 
0023 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0024 #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)   .dep_map = { .name = #lockname },
0025 #else
0026 #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
0027 #endif
0028 
0029 #define __DEFINE_PERCPU_RWSEM(name, is_static)              \
0030 static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name);      \
0031 is_static struct percpu_rw_semaphore name = {               \
0032     .rss = __RCU_SYNC_INITIALIZER(name.rss),            \
0033     .read_count = &__percpu_rwsem_rc_##name,            \
0034     .writer = __RCUWAIT_INITIALIZER(name.writer),           \
0035     .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters),     \
0036     .block = ATOMIC_INIT(0),                    \
0037     __PERCPU_RWSEM_DEP_MAP_INIT(name)               \
0038 }
0039 
0040 #define DEFINE_PERCPU_RWSEM(name)       \
0041     __DEFINE_PERCPU_RWSEM(name, /* not static */)
0042 #define DEFINE_STATIC_PERCPU_RWSEM(name)    \
0043     __DEFINE_PERCPU_RWSEM(name, static)
0044 
0045 extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
0046 
0047 static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
0048 {
0049     might_sleep();
0050 
0051     rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
0052 
0053     preempt_disable();
0054     /*
0055      * We are in an RCU-sched read-side critical section, so the writer
0056      * cannot both change sem->state from readers_fast and start checking
0057      * counters while we are here. So if we see !sem->state, we know that
0058      * the writer won't be checking until we're past the preempt_enable()
0059      * and that once the synchronize_rcu() is done, the writer will see
0060      * anything we did within this RCU-sched read-size critical section.
0061      */
0062     if (likely(rcu_sync_is_idle(&sem->rss)))
0063         this_cpu_inc(*sem->read_count);
0064     else
0065         __percpu_down_read(sem, false); /* Unconditional memory barrier */
0066     /*
0067      * The preempt_enable() prevents the compiler from
0068      * bleeding the critical section out.
0069      */
0070     preempt_enable();
0071 }
0072 
0073 static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
0074 {
0075     bool ret = true;
0076 
0077     preempt_disable();
0078     /*
0079      * Same as in percpu_down_read().
0080      */
0081     if (likely(rcu_sync_is_idle(&sem->rss)))
0082         this_cpu_inc(*sem->read_count);
0083     else
0084         ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
0085     preempt_enable();
0086     /*
0087      * The barrier() from preempt_enable() prevents the compiler from
0088      * bleeding the critical section out.
0089      */
0090 
0091     if (ret)
0092         rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
0093 
0094     return ret;
0095 }
0096 
0097 static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
0098 {
0099     rwsem_release(&sem->dep_map, _RET_IP_);
0100 
0101     preempt_disable();
0102     /*
0103      * Same as in percpu_down_read().
0104      */
0105     if (likely(rcu_sync_is_idle(&sem->rss))) {
0106         this_cpu_dec(*sem->read_count);
0107     } else {
0108         /*
0109          * slowpath; reader will only ever wake a single blocked
0110          * writer.
0111          */
0112         smp_mb(); /* B matches C */
0113         /*
0114          * In other words, if they see our decrement (presumably to
0115          * aggregate zero, as that is the only time it matters) they
0116          * will also see our critical section.
0117          */
0118         this_cpu_dec(*sem->read_count);
0119         rcuwait_wake_up(&sem->writer);
0120     }
0121     preempt_enable();
0122 }
0123 
0124 extern void percpu_down_write(struct percpu_rw_semaphore *);
0125 extern void percpu_up_write(struct percpu_rw_semaphore *);
0126 
0127 extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
0128                 const char *, struct lock_class_key *);
0129 
0130 extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
0131 
0132 #define percpu_init_rwsem(sem)                  \
0133 ({                              \
0134     static struct lock_class_key rwsem_key;         \
0135     __percpu_init_rwsem(sem, #sem, &rwsem_key);     \
0136 })
0137 
0138 #define percpu_rwsem_is_held(sem)   lockdep_is_held(sem)
0139 #define percpu_rwsem_assert_held(sem)   lockdep_assert_held(sem)
0140 
0141 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
0142                     bool read, unsigned long ip)
0143 {
0144     lock_release(&sem->dep_map, ip);
0145 }
0146 
0147 static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
0148                     bool read, unsigned long ip)
0149 {
0150     lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
0151 }
0152 
0153 #endif