Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0+ */
0002 /*
0003  * Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
0004  *
0005  * Copyright (C) 2020 Paul E. McKenney.
0006  */
0007 
0008 #ifndef __LINUX_RCUPDATE_TRACE_H
0009 #define __LINUX_RCUPDATE_TRACE_H
0010 
0011 #include <linux/sched.h>
0012 #include <linux/rcupdate.h>
0013 
0014 extern struct lockdep_map rcu_trace_lock_map;
0015 
0016 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0017 
0018 static inline int rcu_read_lock_trace_held(void)
0019 {
0020     return lock_is_held(&rcu_trace_lock_map);
0021 }
0022 
0023 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
0024 
0025 static inline int rcu_read_lock_trace_held(void)
0026 {
0027     return 1;
0028 }
0029 
0030 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
0031 
0032 #ifdef CONFIG_TASKS_TRACE_RCU
0033 
0034 void rcu_read_unlock_trace_special(struct task_struct *t);
0035 
0036 /**
0037  * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
0038  *
0039  * When synchronize_rcu_tasks_trace() is invoked by one task, then that
0040  * task is guaranteed to block until all other tasks exit their read-side
0041  * critical sections.  Similarly, if call_rcu_trace() is invoked on one
0042  * task while other tasks are within RCU read-side critical sections,
0043  * invocation of the corresponding RCU callback is deferred until after
0044  * the all the other tasks exit their critical sections.
0045  *
0046  * For more details, please see the documentation for rcu_read_lock().
0047  */
0048 static inline void rcu_read_lock_trace(void)
0049 {
0050     struct task_struct *t = current;
0051 
0052     WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
0053     barrier();
0054     if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
0055         t->trc_reader_special.b.need_mb)
0056         smp_mb(); // Pairs with update-side barriers
0057     rcu_lock_acquire(&rcu_trace_lock_map);
0058 }
0059 
0060 /**
0061  * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
0062  *
0063  * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
0064  * allowed.  Invoking a rcu_read_unlock_trace() when there is no matching
0065  * rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
0066  *
0067  * For more details, please see the documentation for rcu_read_unlock().
0068  */
0069 static inline void rcu_read_unlock_trace(void)
0070 {
0071     int nesting;
0072     struct task_struct *t = current;
0073 
0074     rcu_lock_release(&rcu_trace_lock_map);
0075     nesting = READ_ONCE(t->trc_reader_nesting) - 1;
0076     barrier(); // Critical section before disabling.
0077     // Disable IPI-based setting of .need_qs.
0078     WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
0079     if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
0080         WRITE_ONCE(t->trc_reader_nesting, nesting);
0081         return;  // We assume shallow reader nesting.
0082     }
0083     WARN_ON_ONCE(nesting != 0);
0084     rcu_read_unlock_trace_special(t);
0085 }
0086 
0087 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
0088 void synchronize_rcu_tasks_trace(void);
0089 void rcu_barrier_tasks_trace(void);
0090 #else
0091 /*
0092  * The BPF JIT forms these addresses even when it doesn't call these
0093  * functions, so provide definitions that result in runtime errors.
0094  */
0095 static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
0096 static inline void rcu_read_lock_trace(void) { BUG(); }
0097 static inline void rcu_read_unlock_trace(void) { BUG(); }
0098 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
0099 
0100 #endif /* __LINUX_RCUPDATE_TRACE_H */