Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __LINUX_SEQLOCK_H
0003 #define __LINUX_SEQLOCK_H
0004 
0005 /*
0006  * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
0007  * lockless readers (read-only retry loops), and no writer starvation.
0008  *
0009  * See Documentation/locking/seqlock.rst
0010  *
0011  * Copyrights:
0012  * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
0013  * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
0014  */
0015 
0016 #include <linux/compiler.h>
0017 #include <linux/kcsan-checks.h>
0018 #include <linux/lockdep.h>
0019 #include <linux/mutex.h>
0020 #include <linux/preempt.h>
0021 #include <linux/spinlock.h>
0022 
0023 #include <asm/processor.h>
0024 
0025 /*
0026  * The seqlock seqcount_t interface does not prescribe a precise sequence of
0027  * read begin/retry/end. For readers, typically there is a call to
0028  * read_seqcount_begin() and read_seqcount_retry(), however, there are more
0029  * esoteric cases which do not follow this pattern.
0030  *
0031  * As a consequence, we take the following best-effort approach for raw usage
0032  * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
0033  * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
0034  * atomics; if there is a matching read_seqcount_retry() call, no following
0035  * memory operations are considered atomic. Usage of the seqlock_t interface
0036  * is not affected.
0037  */
0038 #define KCSAN_SEQLOCK_REGION_MAX 1000
0039 
0040 /*
0041  * Sequence counters (seqcount_t)
0042  *
0043  * This is the raw counting mechanism, without any writer protection.
0044  *
0045  * Write side critical sections must be serialized and non-preemptible.
0046  *
0047  * If readers can be invoked from hardirq or softirq contexts,
0048  * interrupts or bottom halves must also be respectively disabled before
0049  * entering the write section.
0050  *
0051  * This mechanism can't be used if the protected data contains pointers,
0052  * as the writer can invalidate a pointer that a reader is following.
0053  *
0054  * If the write serialization mechanism is one of the common kernel
0055  * locking primitives, use a sequence counter with associated lock
0056  * (seqcount_LOCKNAME_t) instead.
0057  *
0058  * If it's desired to automatically handle the sequence counter writer
0059  * serialization and non-preemptibility requirements, use a sequential
0060  * lock (seqlock_t) instead.
0061  *
0062  * See Documentation/locking/seqlock.rst
0063  */
0064 typedef struct seqcount {
0065     unsigned sequence;
0066 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0067     struct lockdep_map dep_map;
0068 #endif
0069 } seqcount_t;
0070 
0071 static inline void __seqcount_init(seqcount_t *s, const char *name,
0072                       struct lock_class_key *key)
0073 {
0074     /*
0075      * Make sure we are not reinitializing a held lock:
0076      */
0077     lockdep_init_map(&s->dep_map, name, key, 0);
0078     s->sequence = 0;
0079 }
0080 
0081 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0082 
0083 # define SEQCOUNT_DEP_MAP_INIT(lockname)                \
0084         .dep_map = { .name = #lockname }
0085 
0086 /**
0087  * seqcount_init() - runtime initializer for seqcount_t
0088  * @s: Pointer to the seqcount_t instance
0089  */
0090 # define seqcount_init(s)                       \
0091     do {                                \
0092         static struct lock_class_key __key;         \
0093         __seqcount_init((s), #s, &__key);           \
0094     } while (0)
0095 
0096 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
0097 {
0098     seqcount_t *l = (seqcount_t *)s;
0099     unsigned long flags;
0100 
0101     local_irq_save(flags);
0102     seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
0103     seqcount_release(&l->dep_map, _RET_IP_);
0104     local_irq_restore(flags);
0105 }
0106 
0107 #else
0108 # define SEQCOUNT_DEP_MAP_INIT(lockname)
0109 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
0110 # define seqcount_lockdep_reader_access(x)
0111 #endif
0112 
0113 /**
0114  * SEQCNT_ZERO() - static initializer for seqcount_t
0115  * @name: Name of the seqcount_t instance
0116  */
0117 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
0118 
0119 /*
0120  * Sequence counters with associated locks (seqcount_LOCKNAME_t)
0121  *
0122  * A sequence counter which associates the lock used for writer
0123  * serialization at initialization time. This enables lockdep to validate
0124  * that the write side critical section is properly serialized.
0125  *
0126  * For associated locks which do not implicitly disable preemption,
0127  * preemption protection is enforced in the write side function.
0128  *
0129  * Lockdep is never used in any for the raw write variants.
0130  *
0131  * See Documentation/locking/seqlock.rst
0132  */
0133 
0134 /*
0135  * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
0136  * disable preemption. It can lead to higher latencies, and the write side
0137  * sections will not be able to acquire locks which become sleeping locks
0138  * (e.g. spinlock_t).
0139  *
0140  * To remain preemptible while avoiding a possible livelock caused by the
0141  * reader preempting the writer, use a different technique: let the reader
0142  * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
0143  * case, acquire then release the associated LOCKNAME writer serialization
0144  * lock. This will allow any possibly-preempted writer to make progress
0145  * until the end of its writer serialization lock critical section.
0146  *
0147  * This lock-unlock technique must be implemented for all of PREEMPT_RT
0148  * sleeping locks.  See Documentation/locking/locktypes.rst
0149  */
0150 #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
0151 #define __SEQ_LOCK(expr)    expr
0152 #else
0153 #define __SEQ_LOCK(expr)
0154 #endif
0155 
0156 /*
0157  * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
0158  * @seqcount:   The real sequence counter
0159  * @lock:   Pointer to the associated lock
0160  *
0161  * A plain sequence counter with external writer synchronization by
0162  * LOCKNAME @lock. The lock is associated to the sequence counter in the
0163  * static initializer or init function. This enables lockdep to validate
0164  * that the write side critical section is properly serialized.
0165  *
0166  * LOCKNAME:    raw_spinlock, spinlock, rwlock or mutex
0167  */
0168 
0169 /*
0170  * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
0171  * @s:      Pointer to the seqcount_LOCKNAME_t instance
0172  * @lock:   Pointer to the associated lock
0173  */
0174 
0175 #define seqcount_LOCKNAME_init(s, _lock, lockname)          \
0176     do {                                \
0177         seqcount_##lockname##_t *____s = (s);           \
0178         seqcount_init(&____s->seqcount);            \
0179         __SEQ_LOCK(____s->lock = (_lock));          \
0180     } while (0)
0181 
0182 #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
0183 #define seqcount_spinlock_init(s, lock)     seqcount_LOCKNAME_init(s, lock, spinlock)
0184 #define seqcount_rwlock_init(s, lock)       seqcount_LOCKNAME_init(s, lock, rwlock)
0185 #define seqcount_mutex_init(s, lock)        seqcount_LOCKNAME_init(s, lock, mutex)
0186 
0187 /*
0188  * SEQCOUNT_LOCKNAME()  - Instantiate seqcount_LOCKNAME_t and helpers
0189  * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
0190  *
0191  * @lockname:       "LOCKNAME" part of seqcount_LOCKNAME_t
0192  * @locktype:       LOCKNAME canonical C data type
0193  * @preemptible:    preemptibility of above locktype
0194  * @lockmember:     argument for lockdep_assert_held()
0195  * @lockbase:       associated lock release function (prefix only)
0196  * @lock_acquire:   associated lock acquisition function (full call)
0197  */
0198 #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
0199 typedef struct seqcount_##lockname {                    \
0200     seqcount_t      seqcount;               \
0201     __SEQ_LOCK(locktype *lock);                 \
0202 } seqcount_##lockname##_t;                      \
0203                                     \
0204 static __always_inline seqcount_t *                 \
0205 __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s)          \
0206 {                                   \
0207     return &s->seqcount;                        \
0208 }                                   \
0209                                     \
0210 static __always_inline unsigned                     \
0211 __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)   \
0212 {                                   \
0213     unsigned seq = READ_ONCE(s->seqcount.sequence);         \
0214                                     \
0215     if (!IS_ENABLED(CONFIG_PREEMPT_RT))             \
0216         return seq;                     \
0217                                     \
0218     if (preemptible && unlikely(seq & 1)) {             \
0219         __SEQ_LOCK(lock_acquire);               \
0220         __SEQ_LOCK(lockbase##_unlock(s->lock));         \
0221                                     \
0222         /*                          \
0223          * Re-read the sequence counter since the (possibly \
0224          * preempted) writer made progress.         \
0225          */                         \
0226         seq = READ_ONCE(s->seqcount.sequence);          \
0227     }                               \
0228                                     \
0229     return seq;                         \
0230 }                                   \
0231                                     \
0232 static __always_inline bool                     \
0233 __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s)    \
0234 {                                   \
0235     if (!IS_ENABLED(CONFIG_PREEMPT_RT))             \
0236         return preemptible;                 \
0237                                     \
0238     /* PREEMPT_RT relies on the above LOCK+UNLOCK */        \
0239     return false;                           \
0240 }                                   \
0241                                     \
0242 static __always_inline void                     \
0243 __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s)     \
0244 {                                   \
0245     __SEQ_LOCK(lockdep_assert_held(lockmember));            \
0246 }
0247 
0248 /*
0249  * __seqprop() for seqcount_t
0250  */
0251 
0252 static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
0253 {
0254     return s;
0255 }
0256 
0257 static inline unsigned __seqprop_sequence(const seqcount_t *s)
0258 {
0259     return READ_ONCE(s->sequence);
0260 }
0261 
0262 static inline bool __seqprop_preemptible(const seqcount_t *s)
0263 {
0264     return false;
0265 }
0266 
0267 static inline void __seqprop_assert(const seqcount_t *s)
0268 {
0269     lockdep_assert_preemption_disabled();
0270 }
0271 
0272 #define __SEQ_RT    IS_ENABLED(CONFIG_PREEMPT_RT)
0273 
0274 SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t,  false,    s->lock,        raw_spin, raw_spin_lock(s->lock))
0275 SEQCOUNT_LOCKNAME(spinlock,     spinlock_t,      __SEQ_RT, s->lock,        spin,     spin_lock(s->lock))
0276 SEQCOUNT_LOCKNAME(rwlock,       rwlock_t,        __SEQ_RT, s->lock,        read,     read_lock(s->lock))
0277 SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     s->lock,        mutex,    mutex_lock(s->lock))
0278 
0279 /*
0280  * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
0281  * @name:   Name of the seqcount_LOCKNAME_t instance
0282  * @lock:   Pointer to the associated LOCKNAME
0283  */
0284 
0285 #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) {          \
0286     .seqcount       = SEQCNT_ZERO(seq_name.seqcount),   \
0287     __SEQ_LOCK(.lock    = (assoc_lock))             \
0288 }
0289 
0290 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)    SEQCOUNT_LOCKNAME_ZERO(name, lock)
0291 #define SEQCNT_SPINLOCK_ZERO(name, lock)    SEQCOUNT_LOCKNAME_ZERO(name, lock)
0292 #define SEQCNT_RWLOCK_ZERO(name, lock)      SEQCOUNT_LOCKNAME_ZERO(name, lock)
0293 #define SEQCNT_MUTEX_ZERO(name, lock)       SEQCOUNT_LOCKNAME_ZERO(name, lock)
0294 #define SEQCNT_WW_MUTEX_ZERO(name, lock)    SEQCOUNT_LOCKNAME_ZERO(name, lock)
0295 
0296 #define __seqprop_case(s, lockname, prop)               \
0297     seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
0298 
0299 #define __seqprop(s, prop) _Generic(*(s),               \
0300     seqcount_t:     __seqprop_##prop((void *)(s)),      \
0301     __seqprop_case((s), raw_spinlock,   prop),          \
0302     __seqprop_case((s), spinlock,   prop),          \
0303     __seqprop_case((s), rwlock,     prop),          \
0304     __seqprop_case((s), mutex,      prop))
0305 
0306 #define seqprop_ptr(s)          __seqprop(s, ptr)
0307 #define seqprop_sequence(s)     __seqprop(s, sequence)
0308 #define seqprop_preemptible(s)      __seqprop(s, preemptible)
0309 #define seqprop_assert(s)       __seqprop(s, assert)
0310 
0311 /**
0312  * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
0313  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0314  *
0315  * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
0316  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
0317  * provided before actually loading any of the variables that are to be
0318  * protected in this critical section.
0319  *
0320  * Use carefully, only in critical code, and comment how the barrier is
0321  * provided.
0322  *
0323  * Return: count to be passed to read_seqcount_retry()
0324  */
0325 #define __read_seqcount_begin(s)                    \
0326 ({                                  \
0327     unsigned __seq;                         \
0328                                     \
0329     while ((__seq = seqprop_sequence(s)) & 1)           \
0330         cpu_relax();                        \
0331                                     \
0332     kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);            \
0333     __seq;                              \
0334 })
0335 
0336 /**
0337  * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
0338  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0339  *
0340  * Return: count to be passed to read_seqcount_retry()
0341  */
0342 #define raw_read_seqcount_begin(s)                  \
0343 ({                                  \
0344     unsigned _seq = __read_seqcount_begin(s);           \
0345                                     \
0346     smp_rmb();                          \
0347     _seq;                               \
0348 })
0349 
0350 /**
0351  * read_seqcount_begin() - begin a seqcount_t read critical section
0352  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0353  *
0354  * Return: count to be passed to read_seqcount_retry()
0355  */
0356 #define read_seqcount_begin(s)                      \
0357 ({                                  \
0358     seqcount_lockdep_reader_access(seqprop_ptr(s));         \
0359     raw_read_seqcount_begin(s);                 \
0360 })
0361 
0362 /**
0363  * raw_read_seqcount() - read the raw seqcount_t counter value
0364  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0365  *
0366  * raw_read_seqcount opens a read critical section of the given
0367  * seqcount_t, without any lockdep checking, and without checking or
0368  * masking the sequence counter LSB. Calling code is responsible for
0369  * handling that.
0370  *
0371  * Return: count to be passed to read_seqcount_retry()
0372  */
0373 #define raw_read_seqcount(s)                        \
0374 ({                                  \
0375     unsigned __seq = seqprop_sequence(s);               \
0376                                     \
0377     smp_rmb();                          \
0378     kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);            \
0379     __seq;                              \
0380 })
0381 
0382 /**
0383  * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
0384  *                        lockdep and w/o counter stabilization
0385  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0386  *
0387  * raw_seqcount_begin opens a read critical section of the given
0388  * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
0389  * for the count to stabilize. If a writer is active when it begins, it
0390  * will fail the read_seqcount_retry() at the end of the read critical
0391  * section instead of stabilizing at the beginning of it.
0392  *
0393  * Use this only in special kernel hot paths where the read section is
0394  * small and has a high probability of success through other external
0395  * means. It will save a single branching instruction.
0396  *
0397  * Return: count to be passed to read_seqcount_retry()
0398  */
0399 #define raw_seqcount_begin(s)                       \
0400 ({                                  \
0401     /*                              \
0402      * If the counter is odd, let read_seqcount_retry() fail    \
0403      * by decrementing the counter.                 \
0404      */                             \
0405     raw_read_seqcount(s) & ~1;                  \
0406 })
0407 
0408 /**
0409  * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
0410  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0411  * @start: count, from read_seqcount_begin()
0412  *
0413  * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
0414  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
0415  * provided before actually loading any of the variables that are to be
0416  * protected in this critical section.
0417  *
0418  * Use carefully, only in critical code, and comment how the barrier is
0419  * provided.
0420  *
0421  * Return: true if a read section retry is required, else false
0422  */
0423 #define __read_seqcount_retry(s, start)                 \
0424     do___read_seqcount_retry(seqprop_ptr(s), start)
0425 
0426 static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
0427 {
0428     kcsan_atomic_next(0);
0429     return unlikely(READ_ONCE(s->sequence) != start);
0430 }
0431 
0432 /**
0433  * read_seqcount_retry() - end a seqcount_t read critical section
0434  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0435  * @start: count, from read_seqcount_begin()
0436  *
0437  * read_seqcount_retry closes the read critical section of given
0438  * seqcount_t.  If the critical section was invalid, it must be ignored
0439  * (and typically retried).
0440  *
0441  * Return: true if a read section retry is required, else false
0442  */
0443 #define read_seqcount_retry(s, start)                   \
0444     do_read_seqcount_retry(seqprop_ptr(s), start)
0445 
0446 static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
0447 {
0448     smp_rmb();
0449     return do___read_seqcount_retry(s, start);
0450 }
0451 
0452 /**
0453  * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
0454  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0455  *
0456  * Context: check write_seqcount_begin()
0457  */
0458 #define raw_write_seqcount_begin(s)                 \
0459 do {                                    \
0460     if (seqprop_preemptible(s))                 \
0461         preempt_disable();                  \
0462                                     \
0463     do_raw_write_seqcount_begin(seqprop_ptr(s));            \
0464 } while (0)
0465 
0466 static inline void do_raw_write_seqcount_begin(seqcount_t *s)
0467 {
0468     kcsan_nestable_atomic_begin();
0469     s->sequence++;
0470     smp_wmb();
0471 }
0472 
0473 /**
0474  * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
0475  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0476  *
0477  * Context: check write_seqcount_end()
0478  */
0479 #define raw_write_seqcount_end(s)                   \
0480 do {                                    \
0481     do_raw_write_seqcount_end(seqprop_ptr(s));          \
0482                                     \
0483     if (seqprop_preemptible(s))                 \
0484         preempt_enable();                   \
0485 } while (0)
0486 
0487 static inline void do_raw_write_seqcount_end(seqcount_t *s)
0488 {
0489     smp_wmb();
0490     s->sequence++;
0491     kcsan_nestable_atomic_end();
0492 }
0493 
0494 /**
0495  * write_seqcount_begin_nested() - start a seqcount_t write section with
0496  *                                 custom lockdep nesting level
0497  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0498  * @subclass: lockdep nesting level
0499  *
0500  * See Documentation/locking/lockdep-design.rst
0501  * Context: check write_seqcount_begin()
0502  */
0503 #define write_seqcount_begin_nested(s, subclass)            \
0504 do {                                    \
0505     seqprop_assert(s);                      \
0506                                     \
0507     if (seqprop_preemptible(s))                 \
0508         preempt_disable();                  \
0509                                     \
0510     do_write_seqcount_begin_nested(seqprop_ptr(s), subclass);   \
0511 } while (0)
0512 
0513 static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
0514 {
0515     do_raw_write_seqcount_begin(s);
0516     seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
0517 }
0518 
0519 /**
0520  * write_seqcount_begin() - start a seqcount_t write side critical section
0521  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0522  *
0523  * Context: sequence counter write side sections must be serialized and
0524  * non-preemptible. Preemption will be automatically disabled if and
0525  * only if the seqcount write serialization lock is associated, and
0526  * preemptible.  If readers can be invoked from hardirq or softirq
0527  * context, interrupts or bottom halves must be respectively disabled.
0528  */
0529 #define write_seqcount_begin(s)                     \
0530 do {                                    \
0531     seqprop_assert(s);                      \
0532                                     \
0533     if (seqprop_preemptible(s))                 \
0534         preempt_disable();                  \
0535                                     \
0536     do_write_seqcount_begin(seqprop_ptr(s));            \
0537 } while (0)
0538 
0539 static inline void do_write_seqcount_begin(seqcount_t *s)
0540 {
0541     do_write_seqcount_begin_nested(s, 0);
0542 }
0543 
0544 /**
0545  * write_seqcount_end() - end a seqcount_t write side critical section
0546  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0547  *
0548  * Context: Preemption will be automatically re-enabled if and only if
0549  * the seqcount write serialization lock is associated, and preemptible.
0550  */
0551 #define write_seqcount_end(s)                       \
0552 do {                                    \
0553     do_write_seqcount_end(seqprop_ptr(s));              \
0554                                     \
0555     if (seqprop_preemptible(s))                 \
0556         preempt_enable();                   \
0557 } while (0)
0558 
0559 static inline void do_write_seqcount_end(seqcount_t *s)
0560 {
0561     seqcount_release(&s->dep_map, _RET_IP_);
0562     do_raw_write_seqcount_end(s);
0563 }
0564 
0565 /**
0566  * raw_write_seqcount_barrier() - do a seqcount_t write barrier
0567  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0568  *
0569  * This can be used to provide an ordering guarantee instead of the usual
0570  * consistency guarantee. It is one wmb cheaper, because it can collapse
0571  * the two back-to-back wmb()s.
0572  *
0573  * Note that writes surrounding the barrier should be declared atomic (e.g.
0574  * via WRITE_ONCE): a) to ensure the writes become visible to other threads
0575  * atomically, avoiding compiler optimizations; b) to document which writes are
0576  * meant to propagate to the reader critical section. This is necessary because
0577  * neither writes before and after the barrier are enclosed in a seq-writer
0578  * critical section that would ensure readers are aware of ongoing writes::
0579  *
0580  *  seqcount_t seq;
0581  *  bool X = true, Y = false;
0582  *
0583  *  void read(void)
0584  *  {
0585  *      bool x, y;
0586  *
0587  *      do {
0588  *          int s = read_seqcount_begin(&seq);
0589  *
0590  *          x = X; y = Y;
0591  *
0592  *      } while (read_seqcount_retry(&seq, s));
0593  *
0594  *      BUG_ON(!x && !y);
0595  *      }
0596  *
0597  *      void write(void)
0598  *      {
0599  *      WRITE_ONCE(Y, true);
0600  *
0601  *      raw_write_seqcount_barrier(seq);
0602  *
0603  *      WRITE_ONCE(X, false);
0604  *      }
0605  */
0606 #define raw_write_seqcount_barrier(s)                   \
0607     do_raw_write_seqcount_barrier(seqprop_ptr(s))
0608 
0609 static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
0610 {
0611     kcsan_nestable_atomic_begin();
0612     s->sequence++;
0613     smp_wmb();
0614     s->sequence++;
0615     kcsan_nestable_atomic_end();
0616 }
0617 
0618 /**
0619  * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
0620  *                               side operations
0621  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
0622  *
0623  * After write_seqcount_invalidate, no seqcount_t read side operations
0624  * will complete successfully and see data older than this.
0625  */
0626 #define write_seqcount_invalidate(s)                    \
0627     do_write_seqcount_invalidate(seqprop_ptr(s))
0628 
0629 static inline void do_write_seqcount_invalidate(seqcount_t *s)
0630 {
0631     smp_wmb();
0632     kcsan_nestable_atomic_begin();
0633     s->sequence+=2;
0634     kcsan_nestable_atomic_end();
0635 }
0636 
0637 /*
0638  * Latch sequence counters (seqcount_latch_t)
0639  *
0640  * A sequence counter variant where the counter even/odd value is used to
0641  * switch between two copies of protected data. This allows the read path,
0642  * typically NMIs, to safely interrupt the write side critical section.
0643  *
0644  * As the write sections are fully preemptible, no special handling for
0645  * PREEMPT_RT is needed.
0646  */
0647 typedef struct {
0648     seqcount_t seqcount;
0649 } seqcount_latch_t;
0650 
0651 /**
0652  * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
0653  * @seq_name: Name of the seqcount_latch_t instance
0654  */
0655 #define SEQCNT_LATCH_ZERO(seq_name) {                   \
0656     .seqcount       = SEQCNT_ZERO(seq_name.seqcount),   \
0657 }
0658 
0659 /**
0660  * seqcount_latch_init() - runtime initializer for seqcount_latch_t
0661  * @s: Pointer to the seqcount_latch_t instance
0662  */
0663 #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
0664 
0665 /**
0666  * raw_read_seqcount_latch() - pick even/odd latch data copy
0667  * @s: Pointer to seqcount_latch_t
0668  *
0669  * See raw_write_seqcount_latch() for details and a full reader/writer
0670  * usage example.
0671  *
0672  * Return: sequence counter raw value. Use the lowest bit as an index for
0673  * picking which data copy to read. The full counter must then be checked
0674  * with read_seqcount_latch_retry().
0675  */
0676 static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
0677 {
0678     /*
0679      * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
0680      * Due to the dependent load, a full smp_rmb() is not needed.
0681      */
0682     return READ_ONCE(s->seqcount.sequence);
0683 }
0684 
0685 /**
0686  * read_seqcount_latch_retry() - end a seqcount_latch_t read section
0687  * @s:      Pointer to seqcount_latch_t
0688  * @start:  count, from raw_read_seqcount_latch()
0689  *
0690  * Return: true if a read section retry is required, else false
0691  */
0692 static inline int
0693 read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
0694 {
0695     return read_seqcount_retry(&s->seqcount, start);
0696 }
0697 
0698 /**
0699  * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
0700  * @s: Pointer to seqcount_latch_t
0701  *
0702  * The latch technique is a multiversion concurrency control method that allows
0703  * queries during non-atomic modifications. If you can guarantee queries never
0704  * interrupt the modification -- e.g. the concurrency is strictly between CPUs
0705  * -- you most likely do not need this.
0706  *
0707  * Where the traditional RCU/lockless data structures rely on atomic
0708  * modifications to ensure queries observe either the old or the new state the
0709  * latch allows the same for non-atomic updates. The trade-off is doubling the
0710  * cost of storage; we have to maintain two copies of the entire data
0711  * structure.
0712  *
0713  * Very simply put: we first modify one copy and then the other. This ensures
0714  * there is always one copy in a stable state, ready to give us an answer.
0715  *
0716  * The basic form is a data structure like::
0717  *
0718  *  struct latch_struct {
0719  *      seqcount_latch_t    seq;
0720  *      struct data_struct  data[2];
0721  *  };
0722  *
0723  * Where a modification, which is assumed to be externally serialized, does the
0724  * following::
0725  *
0726  *  void latch_modify(struct latch_struct *latch, ...)
0727  *  {
0728  *      smp_wmb();  // Ensure that the last data[1] update is visible
0729  *      latch->seq.sequence++;
0730  *      smp_wmb();  // Ensure that the seqcount update is visible
0731  *
0732  *      modify(latch->data[0], ...);
0733  *
0734  *      smp_wmb();  // Ensure that the data[0] update is visible
0735  *      latch->seq.sequence++;
0736  *      smp_wmb();  // Ensure that the seqcount update is visible
0737  *
0738  *      modify(latch->data[1], ...);
0739  *  }
0740  *
0741  * The query will have a form like::
0742  *
0743  *  struct entry *latch_query(struct latch_struct *latch, ...)
0744  *  {
0745  *      struct entry *entry;
0746  *      unsigned seq, idx;
0747  *
0748  *      do {
0749  *          seq = raw_read_seqcount_latch(&latch->seq);
0750  *
0751  *          idx = seq & 0x01;
0752  *          entry = data_query(latch->data[idx], ...);
0753  *
0754  *      // This includes needed smp_rmb()
0755  *      } while (read_seqcount_latch_retry(&latch->seq, seq));
0756  *
0757  *      return entry;
0758  *  }
0759  *
0760  * So during the modification, queries are first redirected to data[1]. Then we
0761  * modify data[0]. When that is complete, we redirect queries back to data[0]
0762  * and we can modify data[1].
0763  *
0764  * NOTE:
0765  *
0766  *  The non-requirement for atomic modifications does _NOT_ include
0767  *  the publishing of new entries in the case where data is a dynamic
0768  *  data structure.
0769  *
0770  *  An iteration might start in data[0] and get suspended long enough
0771  *  to miss an entire modification sequence, once it resumes it might
0772  *  observe the new entry.
0773  *
0774  * NOTE2:
0775  *
0776  *  When data is a dynamic data structure; one should use regular RCU
0777  *  patterns to manage the lifetimes of the objects within.
0778  */
0779 static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
0780 {
0781     smp_wmb();  /* prior stores before incrementing "sequence" */
0782     s->seqcount.sequence++;
0783     smp_wmb();      /* increment "sequence" before following stores */
0784 }
0785 
0786 /*
0787  * Sequential locks (seqlock_t)
0788  *
0789  * Sequence counters with an embedded spinlock for writer serialization
0790  * and non-preemptibility.
0791  *
0792  * For more info, see:
0793  *    - Comments on top of seqcount_t
0794  *    - Documentation/locking/seqlock.rst
0795  */
0796 typedef struct {
0797     /*
0798      * Make sure that readers don't starve writers on PREEMPT_RT: use
0799      * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
0800      */
0801     seqcount_spinlock_t seqcount;
0802     spinlock_t lock;
0803 } seqlock_t;
0804 
0805 #define __SEQLOCK_UNLOCKED(lockname)                    \
0806     {                               \
0807         .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
0808         .lock = __SPIN_LOCK_UNLOCKED(lockname)          \
0809     }
0810 
0811 /**
0812  * seqlock_init() - dynamic initializer for seqlock_t
0813  * @sl: Pointer to the seqlock_t instance
0814  */
0815 #define seqlock_init(sl)                        \
0816     do {                                \
0817         spin_lock_init(&(sl)->lock);                \
0818         seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock);   \
0819     } while (0)
0820 
0821 /**
0822  * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
0823  * @sl: Name of the seqlock_t instance
0824  */
0825 #define DEFINE_SEQLOCK(sl) \
0826         seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
0827 
0828 /**
0829  * read_seqbegin() - start a seqlock_t read side critical section
0830  * @sl: Pointer to seqlock_t
0831  *
0832  * Return: count, to be passed to read_seqretry()
0833  */
0834 static inline unsigned read_seqbegin(const seqlock_t *sl)
0835 {
0836     unsigned ret = read_seqcount_begin(&sl->seqcount);
0837 
0838     kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry() */
0839     kcsan_flat_atomic_begin();
0840     return ret;
0841 }
0842 
0843 /**
0844  * read_seqretry() - end a seqlock_t read side section
0845  * @sl: Pointer to seqlock_t
0846  * @start: count, from read_seqbegin()
0847  *
0848  * read_seqretry closes the read side critical section of given seqlock_t.
0849  * If the critical section was invalid, it must be ignored (and typically
0850  * retried).
0851  *
0852  * Return: true if a read section retry is required, else false
0853  */
0854 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
0855 {
0856     /*
0857      * Assume not nested: read_seqretry() may be called multiple times when
0858      * completing read critical section.
0859      */
0860     kcsan_flat_atomic_end();
0861 
0862     return read_seqcount_retry(&sl->seqcount, start);
0863 }
0864 
0865 /*
0866  * For all seqlock_t write side functions, use the the internal
0867  * do_write_seqcount_begin() instead of generic write_seqcount_begin().
0868  * This way, no redundant lockdep_assert_held() checks are added.
0869  */
0870 
0871 /**
0872  * write_seqlock() - start a seqlock_t write side critical section
0873  * @sl: Pointer to seqlock_t
0874  *
0875  * write_seqlock opens a write side critical section for the given
0876  * seqlock_t.  It also implicitly acquires the spinlock_t embedded inside
0877  * that sequential lock. All seqlock_t write side sections are thus
0878  * automatically serialized and non-preemptible.
0879  *
0880  * Context: if the seqlock_t read section, or other write side critical
0881  * sections, can be invoked from hardirq or softirq contexts, use the
0882  * _irqsave or _bh variants of this function instead.
0883  */
0884 static inline void write_seqlock(seqlock_t *sl)
0885 {
0886     spin_lock(&sl->lock);
0887     do_write_seqcount_begin(&sl->seqcount.seqcount);
0888 }
0889 
0890 /**
0891  * write_sequnlock() - end a seqlock_t write side critical section
0892  * @sl: Pointer to seqlock_t
0893  *
0894  * write_sequnlock closes the (serialized and non-preemptible) write side
0895  * critical section of given seqlock_t.
0896  */
0897 static inline void write_sequnlock(seqlock_t *sl)
0898 {
0899     do_write_seqcount_end(&sl->seqcount.seqcount);
0900     spin_unlock(&sl->lock);
0901 }
0902 
0903 /**
0904  * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
0905  * @sl: Pointer to seqlock_t
0906  *
0907  * _bh variant of write_seqlock(). Use only if the read side section, or
0908  * other write side sections, can be invoked from softirq contexts.
0909  */
0910 static inline void write_seqlock_bh(seqlock_t *sl)
0911 {
0912     spin_lock_bh(&sl->lock);
0913     do_write_seqcount_begin(&sl->seqcount.seqcount);
0914 }
0915 
0916 /**
0917  * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
0918  * @sl: Pointer to seqlock_t
0919  *
0920  * write_sequnlock_bh closes the serialized, non-preemptible, and
0921  * softirqs-disabled, seqlock_t write side critical section opened with
0922  * write_seqlock_bh().
0923  */
0924 static inline void write_sequnlock_bh(seqlock_t *sl)
0925 {
0926     do_write_seqcount_end(&sl->seqcount.seqcount);
0927     spin_unlock_bh(&sl->lock);
0928 }
0929 
0930 /**
0931  * write_seqlock_irq() - start a non-interruptible seqlock_t write section
0932  * @sl: Pointer to seqlock_t
0933  *
0934  * _irq variant of write_seqlock(). Use only if the read side section, or
0935  * other write sections, can be invoked from hardirq contexts.
0936  */
0937 static inline void write_seqlock_irq(seqlock_t *sl)
0938 {
0939     spin_lock_irq(&sl->lock);
0940     do_write_seqcount_begin(&sl->seqcount.seqcount);
0941 }
0942 
0943 /**
0944  * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
0945  * @sl: Pointer to seqlock_t
0946  *
0947  * write_sequnlock_irq closes the serialized and non-interruptible
0948  * seqlock_t write side section opened with write_seqlock_irq().
0949  */
0950 static inline void write_sequnlock_irq(seqlock_t *sl)
0951 {
0952     do_write_seqcount_end(&sl->seqcount.seqcount);
0953     spin_unlock_irq(&sl->lock);
0954 }
0955 
0956 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
0957 {
0958     unsigned long flags;
0959 
0960     spin_lock_irqsave(&sl->lock, flags);
0961     do_write_seqcount_begin(&sl->seqcount.seqcount);
0962     return flags;
0963 }
0964 
0965 /**
0966  * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
0967  *                           section
0968  * @lock:  Pointer to seqlock_t
0969  * @flags: Stack-allocated storage for saving caller's local interrupt
0970  *         state, to be passed to write_sequnlock_irqrestore().
0971  *
0972  * _irqsave variant of write_seqlock(). Use it only if the read side
0973  * section, or other write sections, can be invoked from hardirq context.
0974  */
0975 #define write_seqlock_irqsave(lock, flags)              \
0976     do { flags = __write_seqlock_irqsave(lock); } while (0)
0977 
0978 /**
0979  * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
0980  *                                section
0981  * @sl:    Pointer to seqlock_t
0982  * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
0983  *
0984  * write_sequnlock_irqrestore closes the serialized and non-interruptible
0985  * seqlock_t write section previously opened with write_seqlock_irqsave().
0986  */
0987 static inline void
0988 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
0989 {
0990     do_write_seqcount_end(&sl->seqcount.seqcount);
0991     spin_unlock_irqrestore(&sl->lock, flags);
0992 }
0993 
0994 /**
0995  * read_seqlock_excl() - begin a seqlock_t locking reader section
0996  * @sl: Pointer to seqlock_t
0997  *
0998  * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
0999  * locking reader exclusively locks out *both* other writers *and* other
1000  * locking readers, but it does not update the embedded sequence number.
1001  *
1002  * Locking readers act like a normal spin_lock()/spin_unlock().
1003  *
1004  * Context: if the seqlock_t write section, *or other read sections*, can
1005  * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1006  * variant of this function instead.
1007  *
1008  * The opened read section must be closed with read_sequnlock_excl().
1009  */
1010 static inline void read_seqlock_excl(seqlock_t *sl)
1011 {
1012     spin_lock(&sl->lock);
1013 }
1014 
1015 /**
1016  * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1017  * @sl: Pointer to seqlock_t
1018  */
1019 static inline void read_sequnlock_excl(seqlock_t *sl)
1020 {
1021     spin_unlock(&sl->lock);
1022 }
1023 
1024 /**
1025  * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1026  *              softirqs disabled
1027  * @sl: Pointer to seqlock_t
1028  *
1029  * _bh variant of read_seqlock_excl(). Use this variant only if the
1030  * seqlock_t write side section, *or other read sections*, can be invoked
1031  * from softirq contexts.
1032  */
1033 static inline void read_seqlock_excl_bh(seqlock_t *sl)
1034 {
1035     spin_lock_bh(&sl->lock);
1036 }
1037 
1038 /**
1039  * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1040  *                reader section
1041  * @sl: Pointer to seqlock_t
1042  */
1043 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1044 {
1045     spin_unlock_bh(&sl->lock);
1046 }
1047 
1048 /**
1049  * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1050  *               reader section
1051  * @sl: Pointer to seqlock_t
1052  *
1053  * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1054  * write side section, *or other read sections*, can be invoked from a
1055  * hardirq context.
1056  */
1057 static inline void read_seqlock_excl_irq(seqlock_t *sl)
1058 {
1059     spin_lock_irq(&sl->lock);
1060 }
1061 
1062 /**
1063  * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1064  *                             locking reader section
1065  * @sl: Pointer to seqlock_t
1066  */
1067 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1068 {
1069     spin_unlock_irq(&sl->lock);
1070 }
1071 
1072 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1073 {
1074     unsigned long flags;
1075 
1076     spin_lock_irqsave(&sl->lock, flags);
1077     return flags;
1078 }
1079 
1080 /**
1081  * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1082  *               locking reader section
1083  * @lock:  Pointer to seqlock_t
1084  * @flags: Stack-allocated storage for saving caller's local interrupt
1085  *         state, to be passed to read_sequnlock_excl_irqrestore().
1086  *
1087  * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1088  * write side section, *or other read sections*, can be invoked from a
1089  * hardirq context.
1090  */
1091 #define read_seqlock_excl_irqsave(lock, flags)              \
1092     do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1093 
1094 /**
1095  * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1096  *                    locking reader section
1097  * @sl:    Pointer to seqlock_t
1098  * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1099  */
1100 static inline void
1101 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1102 {
1103     spin_unlock_irqrestore(&sl->lock, flags);
1104 }
1105 
1106 /**
1107  * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1108  * @lock: Pointer to seqlock_t
1109  * @seq : Marker and return parameter. If the passed value is even, the
1110  * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1111  * If the passed value is odd, the reader will become a *locking* reader
1112  * as in read_seqlock_excl().  In the first call to this function, the
1113  * caller *must* initialize and pass an even value to @seq; this way, a
1114  * lockless read can be optimistically tried first.
1115  *
1116  * read_seqbegin_or_lock is an API designed to optimistically try a normal
1117  * lockless seqlock_t read section first.  If an odd counter is found, the
1118  * lockless read trial has failed, and the next read iteration transforms
1119  * itself into a full seqlock_t locking reader.
1120  *
1121  * This is typically used to avoid seqlock_t lockless readers starvation
1122  * (too much retry loops) in the case of a sharp spike in write side
1123  * activity.
1124  *
1125  * Context: if the seqlock_t write section, *or other read sections*, can
1126  * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1127  * variant of this function instead.
1128  *
1129  * Check Documentation/locking/seqlock.rst for template example code.
1130  *
1131  * Return: the encountered sequence counter value, through the @seq
1132  * parameter, which is overloaded as a return parameter. This returned
1133  * value must be checked with need_seqretry(). If the read section need to
1134  * be retried, this returned value must also be passed as the @seq
1135  * parameter of the next read_seqbegin_or_lock() iteration.
1136  */
1137 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1138 {
1139     if (!(*seq & 1))    /* Even */
1140         *seq = read_seqbegin(lock);
1141     else            /* Odd */
1142         read_seqlock_excl(lock);
1143 }
1144 
1145 /**
1146  * need_seqretry() - validate seqlock_t "locking or lockless" read section
1147  * @lock: Pointer to seqlock_t
1148  * @seq: sequence count, from read_seqbegin_or_lock()
1149  *
1150  * Return: true if a read section retry is required, false otherwise
1151  */
1152 static inline int need_seqretry(seqlock_t *lock, int seq)
1153 {
1154     return !(seq & 1) && read_seqretry(lock, seq);
1155 }
1156 
1157 /**
1158  * done_seqretry() - end seqlock_t "locking or lockless" reader section
1159  * @lock: Pointer to seqlock_t
1160  * @seq: count, from read_seqbegin_or_lock()
1161  *
1162  * done_seqretry finishes the seqlock_t read side critical section started
1163  * with read_seqbegin_or_lock() and validated by need_seqretry().
1164  */
1165 static inline void done_seqretry(seqlock_t *lock, int seq)
1166 {
1167     if (seq & 1)
1168         read_sequnlock_excl(lock);
1169 }
1170 
1171 /**
1172  * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1173  *                                   a non-interruptible locking reader
1174  * @lock: Pointer to seqlock_t
1175  * @seq:  Marker and return parameter. Check read_seqbegin_or_lock().
1176  *
1177  * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1178  * the seqlock_t write section, *or other read sections*, can be invoked
1179  * from hardirq context.
1180  *
1181  * Note: Interrupts will be disabled only for "locking reader" mode.
1182  *
1183  * Return:
1184  *
1185  *   1. The saved local interrupts state in case of a locking reader, to
1186  *      be passed to done_seqretry_irqrestore().
1187  *
1188  *   2. The encountered sequence counter value, returned through @seq
1189  *      overloaded as a return parameter. Check read_seqbegin_or_lock().
1190  */
1191 static inline unsigned long
1192 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1193 {
1194     unsigned long flags = 0;
1195 
1196     if (!(*seq & 1))    /* Even */
1197         *seq = read_seqbegin(lock);
1198     else            /* Odd */
1199         read_seqlock_excl_irqsave(lock, flags);
1200 
1201     return flags;
1202 }
1203 
1204 /**
1205  * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1206  *              non-interruptible locking reader section
1207  * @lock:  Pointer to seqlock_t
1208  * @seq:   Count, from read_seqbegin_or_lock_irqsave()
1209  * @flags: Caller's saved local interrupt state in case of a locking
1210  *     reader, also from read_seqbegin_or_lock_irqsave()
1211  *
1212  * This is the _irqrestore variant of done_seqretry(). The read section
1213  * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1214  * by need_seqretry().
1215  */
1216 static inline void
1217 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1218 {
1219     if (seq & 1)
1220         read_sequnlock_excl_irqrestore(lock, flags);
1221 }
1222 #endif /* __LINUX_SEQLOCK_H */