0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
0054 {
0055 int r;
0056
0057
0058
0059
0060
0061 for (r = atomic_read(&rwb->readers); r < 0;) {
0062 if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1)))
0063 return 1;
0064 }
0065 return 0;
0066 }
0067
0068 static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
0069 unsigned int state)
0070 {
0071 struct rt_mutex_base *rtm = &rwb->rtmutex;
0072 int ret;
0073
0074 raw_spin_lock_irq(&rtm->wait_lock);
0075
0076
0077
0078
0079 if (atomic_read(&rwb->readers) != WRITER_BIAS) {
0080 atomic_inc(&rwb->readers);
0081 raw_spin_unlock_irq(&rtm->wait_lock);
0082 return 0;
0083 }
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 trace_contention_begin(rwb, LCB_F_RT | LCB_F_READ);
0116
0117
0118
0119
0120
0121 ret = rwbase_rtmutex_slowlock_locked(rtm, state);
0122
0123
0124
0125
0126
0127
0128
0129
0130 if (!ret)
0131 atomic_inc(&rwb->readers);
0132 raw_spin_unlock_irq(&rtm->wait_lock);
0133 if (!ret)
0134 rwbase_rtmutex_unlock(rtm);
0135
0136 trace_contention_end(rwb, ret);
0137 return ret;
0138 }
0139
0140 static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
0141 unsigned int state)
0142 {
0143 if (rwbase_read_trylock(rwb))
0144 return 0;
0145
0146 return __rwbase_read_lock(rwb, state);
0147 }
0148
0149 static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
0150 unsigned int state)
0151 {
0152 struct rt_mutex_base *rtm = &rwb->rtmutex;
0153 struct task_struct *owner;
0154 DEFINE_RT_WAKE_Q(wqh);
0155
0156 raw_spin_lock_irq(&rtm->wait_lock);
0157
0158
0159
0160
0161
0162
0163 owner = rt_mutex_owner(rtm);
0164 if (owner)
0165 rt_mutex_wake_q_add_task(&wqh, owner, state);
0166
0167
0168 preempt_disable();
0169 raw_spin_unlock_irq(&rtm->wait_lock);
0170 rt_mutex_wake_up_q(&wqh);
0171 }
0172
0173 static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
0174 unsigned int state)
0175 {
0176
0177
0178
0179
0180
0181
0182 if (unlikely(atomic_dec_and_test(&rwb->readers)))
0183 __rwbase_read_unlock(rwb, state);
0184 }
0185
0186 static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
0187 unsigned long flags)
0188 {
0189 struct rt_mutex_base *rtm = &rwb->rtmutex;
0190
0191
0192
0193
0194
0195 (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
0196 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
0197 rwbase_rtmutex_unlock(rtm);
0198 }
0199
0200 static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
0201 {
0202 struct rt_mutex_base *rtm = &rwb->rtmutex;
0203 unsigned long flags;
0204
0205 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
0206 __rwbase_write_unlock(rwb, WRITER_BIAS, flags);
0207 }
0208
0209 static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
0210 {
0211 struct rt_mutex_base *rtm = &rwb->rtmutex;
0212 unsigned long flags;
0213
0214 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
0215
0216 __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
0217 }
0218
0219 static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
0220 {
0221
0222 lockdep_assert_held(&rwb->rtmutex.wait_lock);
0223
0224
0225
0226
0227
0228 if (!atomic_read_acquire(&rwb->readers)) {
0229 atomic_set(&rwb->readers, WRITER_BIAS);
0230 return 1;
0231 }
0232
0233 return 0;
0234 }
0235
0236 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
0237 unsigned int state)
0238 {
0239 struct rt_mutex_base *rtm = &rwb->rtmutex;
0240 unsigned long flags;
0241
0242
0243 if (rwbase_rtmutex_lock_state(rtm, state))
0244 return -EINTR;
0245
0246
0247 atomic_sub(READER_BIAS, &rwb->readers);
0248
0249 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
0250 if (__rwbase_write_trylock(rwb))
0251 goto out_unlock;
0252
0253 rwbase_set_and_save_current_state(state);
0254 trace_contention_begin(rwb, LCB_F_RT | LCB_F_WRITE);
0255 for (;;) {
0256
0257 if (rwbase_signal_pending_state(state, current)) {
0258 rwbase_restore_current_state();
0259 __rwbase_write_unlock(rwb, 0, flags);
0260 trace_contention_end(rwb, -EINTR);
0261 return -EINTR;
0262 }
0263
0264 if (__rwbase_write_trylock(rwb))
0265 break;
0266
0267 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
0268 rwbase_schedule();
0269 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
0270
0271 set_current_state(state);
0272 }
0273 rwbase_restore_current_state();
0274 trace_contention_end(rwb, 0);
0275
0276 out_unlock:
0277 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
0278 return 0;
0279 }
0280
0281 static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
0282 {
0283 struct rt_mutex_base *rtm = &rwb->rtmutex;
0284 unsigned long flags;
0285
0286 if (!rwbase_rtmutex_trylock(rtm))
0287 return 0;
0288
0289 atomic_sub(READER_BIAS, &rwb->readers);
0290
0291 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
0292 if (__rwbase_write_trylock(rwb)) {
0293 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
0294 return 1;
0295 }
0296 __rwbase_write_unlock(rwb, 0, flags);
0297 return 0;
0298 }