0001
0002 #include <linux/atomic.h>
0003 #include <linux/percpu.h>
0004 #include <linux/wait.h>
0005 #include <linux/lockdep.h>
0006 #include <linux/percpu-rwsem.h>
0007 #include <linux/rcupdate.h>
0008 #include <linux/sched.h>
0009 #include <linux/sched/task.h>
0010 #include <linux/sched/debug.h>
0011 #include <linux/errno.h>
0012 #include <trace/events/lock.h>
0013
0014 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
0015 const char *name, struct lock_class_key *key)
0016 {
0017 sem->read_count = alloc_percpu(int);
0018 if (unlikely(!sem->read_count))
0019 return -ENOMEM;
0020
0021 rcu_sync_init(&sem->rss);
0022 rcuwait_init(&sem->writer);
0023 init_waitqueue_head(&sem->waiters);
0024 atomic_set(&sem->block, 0);
0025 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0026 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
0027 lockdep_init_map(&sem->dep_map, name, key, 0);
0028 #endif
0029 return 0;
0030 }
0031 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
0032
0033 void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
0034 {
0035
0036
0037
0038
0039 if (!sem->read_count)
0040 return;
0041
0042 rcu_sync_dtor(&sem->rss);
0043 free_percpu(sem->read_count);
0044 sem->read_count = NULL;
0045 }
0046 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
0047
0048 static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
0049 {
0050 this_cpu_inc(*sem->read_count);
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067 smp_mb();
0068
0069
0070
0071
0072
0073 if (likely(!atomic_read_acquire(&sem->block)))
0074 return true;
0075
0076 this_cpu_dec(*sem->read_count);
0077
0078
0079 rcuwait_wake_up(&sem->writer);
0080
0081 return false;
0082 }
0083
0084 static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore *sem)
0085 {
0086 if (atomic_read(&sem->block))
0087 return false;
0088
0089 return atomic_xchg(&sem->block, 1) == 0;
0090 }
0091
0092 static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore *sem, bool reader)
0093 {
0094 if (reader) {
0095 bool ret;
0096
0097 preempt_disable();
0098 ret = __percpu_down_read_trylock(sem);
0099 preempt_enable();
0100
0101 return ret;
0102 }
0103 return __percpu_down_write_trylock(sem);
0104 }
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry,
0120 unsigned int mode, int wake_flags,
0121 void *key)
0122 {
0123 bool reader = wq_entry->flags & WQ_FLAG_CUSTOM;
0124 struct percpu_rw_semaphore *sem = key;
0125 struct task_struct *p;
0126
0127
0128 if (!__percpu_rwsem_trylock(sem, reader))
0129 return 1;
0130
0131 p = get_task_struct(wq_entry->private);
0132 list_del_init(&wq_entry->entry);
0133 smp_store_release(&wq_entry->private, NULL);
0134
0135 wake_up_process(p);
0136 put_task_struct(p);
0137
0138 return !reader;
0139 }
0140
0141 static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
0142 {
0143 DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function);
0144 bool wait;
0145
0146 spin_lock_irq(&sem->waiters.lock);
0147
0148
0149
0150
0151 wait = !__percpu_rwsem_trylock(sem, reader);
0152 if (wait) {
0153 wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
0154 __add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
0155 }
0156 spin_unlock_irq(&sem->waiters.lock);
0157
0158 while (wait) {
0159 set_current_state(TASK_UNINTERRUPTIBLE);
0160 if (!smp_load_acquire(&wq_entry.private))
0161 break;
0162 schedule();
0163 }
0164 __set_current_state(TASK_RUNNING);
0165 }
0166
0167 bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
0168 {
0169 if (__percpu_down_read_trylock(sem))
0170 return true;
0171
0172 if (try)
0173 return false;
0174
0175 trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_READ);
0176 preempt_enable();
0177 percpu_rwsem_wait(sem, true);
0178 preempt_disable();
0179 trace_contention_end(sem, 0);
0180
0181 return true;
0182 }
0183 EXPORT_SYMBOL_GPL(__percpu_down_read);
0184
0185 #define per_cpu_sum(var) \
0186 ({ \
0187 typeof(var) __sum = 0; \
0188 int cpu; \
0189 compiletime_assert_atomic_type(__sum); \
0190 for_each_possible_cpu(cpu) \
0191 __sum += per_cpu(var, cpu); \
0192 __sum; \
0193 })
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203 static bool readers_active_check(struct percpu_rw_semaphore *sem)
0204 {
0205 if (per_cpu_sum(*sem->read_count) != 0)
0206 return false;
0207
0208
0209
0210
0211
0212
0213 smp_mb();
0214
0215 return true;
0216 }
0217
0218 void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
0219 {
0220 might_sleep();
0221 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
0222 trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE);
0223
0224
0225 rcu_sync_enter(&sem->rss);
0226
0227
0228
0229
0230
0231 if (!__percpu_down_write_trylock(sem))
0232 percpu_rwsem_wait(sem, false);
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243 rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
0244 trace_contention_end(sem, 0);
0245 }
0246 EXPORT_SYMBOL_GPL(percpu_down_write);
0247
0248 void percpu_up_write(struct percpu_rw_semaphore *sem)
0249 {
0250 rwsem_release(&sem->dep_map, _RET_IP_);
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 atomic_set_release(&sem->block, 0);
0263
0264
0265
0266
0267 __wake_up(&sem->waiters, TASK_NORMAL, 1, sem);
0268
0269
0270
0271
0272
0273
0274 rcu_sync_exit(&sem->rss);
0275 }
0276 EXPORT_SYMBOL_GPL(percpu_up_write);