0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __LINUX_RCUTREE_H
0018 #define __LINUX_RCUTREE_H
0019
0020 void rcu_softirq_qs(void);
0021 void rcu_note_context_switch(bool preempt);
0022 int rcu_needs_cpu(void);
0023 void rcu_cpu_stall_reset(void);
0024
0025
0026
0027
0028
0029
0030 static inline void rcu_virt_note_context_switch(int cpu)
0031 {
0032 rcu_note_context_switch(false);
0033 }
0034
0035 void synchronize_rcu_expedited(void);
0036 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
0037
0038 void rcu_barrier(void);
0039 bool rcu_eqs_special_set(int cpu);
0040 void rcu_momentary_dyntick_idle(void);
0041 void kfree_rcu_scheduler_running(void);
0042 bool rcu_gp_might_be_stalled(void);
0043 unsigned long start_poll_synchronize_rcu_expedited(void);
0044 void cond_synchronize_rcu_expedited(unsigned long oldstate);
0045 unsigned long get_state_synchronize_rcu(void);
0046 unsigned long start_poll_synchronize_rcu(void);
0047 bool poll_state_synchronize_rcu(unsigned long oldstate);
0048 void cond_synchronize_rcu(unsigned long oldstate);
0049
0050 bool rcu_is_idle_cpu(int cpu);
0051
0052 #ifdef CONFIG_PROVE_RCU
0053 void rcu_irq_exit_check_preempt(void);
0054 #else
0055 static inline void rcu_irq_exit_check_preempt(void) { }
0056 #endif
0057
0058 struct task_struct;
0059 void rcu_preempt_deferred_qs(struct task_struct *t);
0060
0061 void exit_rcu(void);
0062
0063 void rcu_scheduler_starting(void);
0064 extern int rcu_scheduler_active;
0065 void rcu_end_inkernel_boot(void);
0066 bool rcu_inkernel_boot_has_ended(void);
0067 bool rcu_is_watching(void);
0068 #ifndef CONFIG_PREEMPTION
0069 void rcu_all_qs(void);
0070 #endif
0071
0072
0073 int rcutree_prepare_cpu(unsigned int cpu);
0074 int rcutree_online_cpu(unsigned int cpu);
0075 int rcutree_offline_cpu(unsigned int cpu);
0076 int rcutree_dead_cpu(unsigned int cpu);
0077 int rcutree_dying_cpu(unsigned int cpu);
0078 void rcu_cpu_starting(unsigned int cpu);
0079
0080 #endif