Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0+ */
0002 /*
0003  * Sleepable Read-Copy Update mechanism for mutual exclusion,
0004  *  tiny variant.
0005  *
0006  * Copyright (C) IBM Corporation, 2017
0007  *
0008  * Author: Paul McKenney <paulmck@linux.ibm.com>
0009  */
0010 
0011 #ifndef _LINUX_SRCU_TINY_H
0012 #define _LINUX_SRCU_TINY_H
0013 
0014 #include <linux/swait.h>
0015 
0016 struct srcu_struct {
0017     short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
0018     unsigned short srcu_idx;    /* Current reader array element in bit 0x2. */
0019     unsigned short srcu_idx_max;    /* Furthest future srcu_idx request. */
0020     u8 srcu_gp_running;     /* GP workqueue running? */
0021     u8 srcu_gp_waiting;     /* GP waiting for readers? */
0022     struct swait_queue_head srcu_wq;
0023                     /* Last srcu_read_unlock() wakes GP. */
0024     struct rcu_head *srcu_cb_head;  /* Pending callbacks: Head. */
0025     struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */
0026     struct work_struct srcu_work;   /* For driving grace periods. */
0027 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0028     struct lockdep_map dep_map;
0029 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
0030 };
0031 
0032 void srcu_drive_gp(struct work_struct *wp);
0033 
0034 #define __SRCU_STRUCT_INIT(name, __ignored)             \
0035 {                                   \
0036     .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq),    \
0037     .srcu_cb_tail = &name.srcu_cb_head,             \
0038     .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
0039     __SRCU_DEP_MAP_INIT(name)                   \
0040 }
0041 
0042 /*
0043  * This odd _STATIC_ arrangement is needed for API compatibility with
0044  * Tree SRCU, which needs some per-CPU data.
0045  */
0046 #define DEFINE_SRCU(name) \
0047     struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
0048 #define DEFINE_STATIC_SRCU(name) \
0049     static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
0050 
0051 void synchronize_srcu(struct srcu_struct *ssp);
0052 
0053 /*
0054  * Counts the new reader in the appropriate per-CPU element of the
0055  * srcu_struct.  Can be invoked from irq/bh handlers, but the matching
0056  * __srcu_read_unlock() must be in the same handler instance.  Returns an
0057  * index that must be passed to the matching srcu_read_unlock().
0058  */
0059 static inline int __srcu_read_lock(struct srcu_struct *ssp)
0060 {
0061     int idx;
0062 
0063     idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
0064     WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
0065     return idx;
0066 }
0067 
0068 static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
0069 {
0070     synchronize_srcu(ssp);
0071 }
0072 
0073 static inline void srcu_barrier(struct srcu_struct *ssp)
0074 {
0075     synchronize_srcu(ssp);
0076 }
0077 
0078 /* Defined here to avoid size increase for non-torture kernels. */
0079 static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
0080                         char *tt, char *tf)
0081 {
0082     int idx;
0083 
0084     idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
0085     pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
0086          tt, tf, idx,
0087          data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
0088          data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])));
0089 }
0090 
0091 #endif