Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * RCU segmented callback lists, function definitions
0004  *
0005  * Copyright IBM Corporation, 2017
0006  *
0007  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
0008  */
0009 
0010 #include <linux/cpu.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/kernel.h>
0013 #include <linux/types.h>
0014 
0015 #include "rcu_segcblist.h"
0016 
0017 /* Initialize simple callback list. */
0018 void rcu_cblist_init(struct rcu_cblist *rclp)
0019 {
0020     rclp->head = NULL;
0021     rclp->tail = &rclp->head;
0022     rclp->len = 0;
0023 }
0024 
0025 /*
0026  * Enqueue an rcu_head structure onto the specified callback list.
0027  */
0028 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp)
0029 {
0030     *rclp->tail = rhp;
0031     rclp->tail = &rhp->next;
0032     WRITE_ONCE(rclp->len, rclp->len + 1);
0033 }
0034 
0035 /*
0036  * Flush the second rcu_cblist structure onto the first one, obliterating
0037  * any contents of the first.  If rhp is non-NULL, enqueue it as the sole
0038  * element of the second rcu_cblist structure, but ensuring that the second
0039  * rcu_cblist structure, if initially non-empty, always appears non-empty
0040  * throughout the process.  If rdp is NULL, the second rcu_cblist structure
0041  * is instead initialized to empty.
0042  */
0043 void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
0044                   struct rcu_cblist *srclp,
0045                   struct rcu_head *rhp)
0046 {
0047     drclp->head = srclp->head;
0048     if (drclp->head)
0049         drclp->tail = srclp->tail;
0050     else
0051         drclp->tail = &drclp->head;
0052     drclp->len = srclp->len;
0053     if (!rhp) {
0054         rcu_cblist_init(srclp);
0055     } else {
0056         rhp->next = NULL;
0057         srclp->head = rhp;
0058         srclp->tail = &rhp->next;
0059         WRITE_ONCE(srclp->len, 1);
0060     }
0061 }
0062 
0063 /*
0064  * Dequeue the oldest rcu_head structure from the specified callback
0065  * list.
0066  */
0067 struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp)
0068 {
0069     struct rcu_head *rhp;
0070 
0071     rhp = rclp->head;
0072     if (!rhp)
0073         return NULL;
0074     rclp->len--;
0075     rclp->head = rhp->next;
0076     if (!rclp->head)
0077         rclp->tail = &rclp->head;
0078     return rhp;
0079 }
0080 
0081 /* Set the length of an rcu_segcblist structure. */
0082 static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v)
0083 {
0084 #ifdef CONFIG_RCU_NOCB_CPU
0085     atomic_long_set(&rsclp->len, v);
0086 #else
0087     WRITE_ONCE(rsclp->len, v);
0088 #endif
0089 }
0090 
0091 /* Get the length of a segment of the rcu_segcblist structure. */
0092 static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg)
0093 {
0094     return READ_ONCE(rsclp->seglen[seg]);
0095 }
0096 
0097 /* Return number of callbacks in segmented callback list by summing seglen. */
0098 long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp)
0099 {
0100     long len = 0;
0101     int i;
0102 
0103     for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
0104         len += rcu_segcblist_get_seglen(rsclp, i);
0105 
0106     return len;
0107 }
0108 
0109 /* Set the length of a segment of the rcu_segcblist structure. */
0110 static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v)
0111 {
0112     WRITE_ONCE(rsclp->seglen[seg], v);
0113 }
0114 
0115 /* Increase the numeric length of a segment by a specified amount. */
0116 static void rcu_segcblist_add_seglen(struct rcu_segcblist *rsclp, int seg, long v)
0117 {
0118     WRITE_ONCE(rsclp->seglen[seg], rsclp->seglen[seg] + v);
0119 }
0120 
0121 /* Move from's segment length to to's segment. */
0122 static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to)
0123 {
0124     long len;
0125 
0126     if (from == to)
0127         return;
0128 
0129     len = rcu_segcblist_get_seglen(rsclp, from);
0130     if (!len)
0131         return;
0132 
0133     rcu_segcblist_add_seglen(rsclp, to, len);
0134     rcu_segcblist_set_seglen(rsclp, from, 0);
0135 }
0136 
0137 /* Increment segment's length. */
0138 static void rcu_segcblist_inc_seglen(struct rcu_segcblist *rsclp, int seg)
0139 {
0140     rcu_segcblist_add_seglen(rsclp, seg, 1);
0141 }
0142 
0143 /*
0144  * Increase the numeric length of an rcu_segcblist structure by the
0145  * specified amount, which can be negative.  This can cause the ->len
0146  * field to disagree with the actual number of callbacks on the structure.
0147  * This increase is fully ordered with respect to the callers accesses
0148  * both before and after.
0149  *
0150  * So why on earth is a memory barrier required both before and after
0151  * the update to the ->len field???
0152  *
0153  * The reason is that rcu_barrier() locklessly samples each CPU's ->len
0154  * field, and if a given CPU's field is zero, avoids IPIing that CPU.
0155  * This can of course race with both queuing and invoking of callbacks.
0156  * Failing to correctly handle either of these races could result in
0157  * rcu_barrier() failing to IPI a CPU that actually had callbacks queued
0158  * which rcu_barrier() was obligated to wait on.  And if rcu_barrier()
0159  * failed to wait on such a callback, unloading certain kernel modules
0160  * would result in calls to functions whose code was no longer present in
0161  * the kernel, for but one example.
0162  *
0163  * Therefore, ->len transitions from 1->0 and 0->1 have to be carefully
0164  * ordered with respect with both list modifications and the rcu_barrier().
0165  *
0166  * The queuing case is CASE 1 and the invoking case is CASE 2.
0167  *
0168  * CASE 1: Suppose that CPU 0 has no callbacks queued, but invokes
0169  * call_rcu() just as CPU 1 invokes rcu_barrier().  CPU 0's ->len field
0170  * will transition from 0->1, which is one of the transitions that must
0171  * be handled carefully.  Without the full memory barriers after the ->len
0172  * update and at the beginning of rcu_barrier(), the following could happen:
0173  *
0174  * CPU 0                CPU 1
0175  *
0176  * call_rcu().
0177  *                  rcu_barrier() sees ->len as 0.
0178  * set ->len = 1.
0179  *                  rcu_barrier() does nothing.
0180  *                  module is unloaded.
0181  * callback invokes unloaded function!
0182  *
0183  * With the full barriers, any case where rcu_barrier() sees ->len as 0 will
0184  * have unambiguously preceded the return from the racing call_rcu(), which
0185  * means that this call_rcu() invocation is OK to not wait on.  After all,
0186  * you are supposed to make sure that any problematic call_rcu() invocations
0187  * happen before the rcu_barrier().
0188  *
0189  *
0190  * CASE 2: Suppose that CPU 0 is invoking its last callback just as
0191  * CPU 1 invokes rcu_barrier().  CPU 0's ->len field will transition from
0192  * 1->0, which is one of the transitions that must be handled carefully.
0193  * Without the full memory barriers before the ->len update and at the
0194  * end of rcu_barrier(), the following could happen:
0195  *
0196  * CPU 0                CPU 1
0197  *
0198  * start invoking last callback
0199  * set ->len = 0 (reordered)
0200  *                  rcu_barrier() sees ->len as 0
0201  *                  rcu_barrier() does nothing.
0202  *                  module is unloaded
0203  * callback executing after unloaded!
0204  *
0205  * With the full barriers, any case where rcu_barrier() sees ->len as 0
0206  * will be fully ordered after the completion of the callback function,
0207  * so that the module unloading operation is completely safe.
0208  *
0209  */
0210 void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v)
0211 {
0212 #ifdef CONFIG_RCU_NOCB_CPU
0213     smp_mb__before_atomic(); // Read header comment above.
0214     atomic_long_add(v, &rsclp->len);
0215     smp_mb__after_atomic();  // Read header comment above.
0216 #else
0217     smp_mb(); // Read header comment above.
0218     WRITE_ONCE(rsclp->len, rsclp->len + v);
0219     smp_mb(); // Read header comment above.
0220 #endif
0221 }
0222 
0223 /*
0224  * Increase the numeric length of an rcu_segcblist structure by one.
0225  * This can cause the ->len field to disagree with the actual number of
0226  * callbacks on the structure.  This increase is fully ordered with respect
0227  * to the callers accesses both before and after.
0228  */
0229 void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp)
0230 {
0231     rcu_segcblist_add_len(rsclp, 1);
0232 }
0233 
0234 /*
0235  * Initialize an rcu_segcblist structure.
0236  */
0237 void rcu_segcblist_init(struct rcu_segcblist *rsclp)
0238 {
0239     int i;
0240 
0241     BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq));
0242     BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq));
0243     rsclp->head = NULL;
0244     for (i = 0; i < RCU_CBLIST_NSEGS; i++) {
0245         rsclp->tails[i] = &rsclp->head;
0246         rcu_segcblist_set_seglen(rsclp, i, 0);
0247     }
0248     rcu_segcblist_set_len(rsclp, 0);
0249     rcu_segcblist_set_flags(rsclp, SEGCBLIST_ENABLED);
0250 }
0251 
0252 /*
0253  * Disable the specified rcu_segcblist structure, so that callbacks can
0254  * no longer be posted to it.  This structure must be empty.
0255  */
0256 void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
0257 {
0258     WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
0259     WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
0260     rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED);
0261 }
0262 
0263 /*
0264  * Mark the specified rcu_segcblist structure as offloaded (or not)
0265  */
0266 void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
0267 {
0268     if (offload)
0269         rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED);
0270     else
0271         rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED);
0272 }
0273 
0274 /*
0275  * Does the specified rcu_segcblist structure contain callbacks that
0276  * are ready to be invoked?
0277  */
0278 bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp)
0279 {
0280     return rcu_segcblist_is_enabled(rsclp) &&
0281            &rsclp->head != READ_ONCE(rsclp->tails[RCU_DONE_TAIL]);
0282 }
0283 
0284 /*
0285  * Does the specified rcu_segcblist structure contain callbacks that
0286  * are still pending, that is, not yet ready to be invoked?
0287  */
0288 bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp)
0289 {
0290     return rcu_segcblist_is_enabled(rsclp) &&
0291            !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL);
0292 }
0293 
0294 /*
0295  * Return a pointer to the first callback in the specified rcu_segcblist
0296  * structure.  This is useful for diagnostics.
0297  */
0298 struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp)
0299 {
0300     if (rcu_segcblist_is_enabled(rsclp))
0301         return rsclp->head;
0302     return NULL;
0303 }
0304 
0305 /*
0306  * Return a pointer to the first pending callback in the specified
0307  * rcu_segcblist structure.  This is useful just after posting a given
0308  * callback -- if that callback is the first pending callback, then
0309  * you cannot rely on someone else having already started up the required
0310  * grace period.
0311  */
0312 struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp)
0313 {
0314     if (rcu_segcblist_is_enabled(rsclp))
0315         return *rsclp->tails[RCU_DONE_TAIL];
0316     return NULL;
0317 }
0318 
0319 /*
0320  * Return false if there are no CBs awaiting grace periods, otherwise,
0321  * return true and store the nearest waited-upon grace period into *lp.
0322  */
0323 bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp)
0324 {
0325     if (!rcu_segcblist_pend_cbs(rsclp))
0326         return false;
0327     *lp = rsclp->gp_seq[RCU_WAIT_TAIL];
0328     return true;
0329 }
0330 
0331 /*
0332  * Enqueue the specified callback onto the specified rcu_segcblist
0333  * structure, updating accounting as needed.  Note that the ->len
0334  * field may be accessed locklessly, hence the WRITE_ONCE().
0335  * The ->len field is used by rcu_barrier() and friends to determine
0336  * if it must post a callback on this structure, and it is OK
0337  * for rcu_barrier() to sometimes post callbacks needlessly, but
0338  * absolutely not OK for it to ever miss posting a callback.
0339  */
0340 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
0341                struct rcu_head *rhp)
0342 {
0343     rcu_segcblist_inc_len(rsclp);
0344     rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL);
0345     rhp->next = NULL;
0346     WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp);
0347     WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next);
0348 }
0349 
0350 /*
0351  * Entrain the specified callback onto the specified rcu_segcblist at
0352  * the end of the last non-empty segment.  If the entire rcu_segcblist
0353  * is empty, make no change, but return false.
0354  *
0355  * This is intended for use by rcu_barrier()-like primitives, -not-
0356  * for normal grace-period use.  IMPORTANT:  The callback you enqueue
0357  * will wait for all prior callbacks, NOT necessarily for a grace
0358  * period.  You have been warned.
0359  */
0360 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
0361                struct rcu_head *rhp)
0362 {
0363     int i;
0364 
0365     if (rcu_segcblist_n_cbs(rsclp) == 0)
0366         return false;
0367     rcu_segcblist_inc_len(rsclp);
0368     smp_mb(); /* Ensure counts are updated before callback is entrained. */
0369     rhp->next = NULL;
0370     for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
0371         if (rsclp->tails[i] != rsclp->tails[i - 1])
0372             break;
0373     rcu_segcblist_inc_seglen(rsclp, i);
0374     WRITE_ONCE(*rsclp->tails[i], rhp);
0375     for (; i <= RCU_NEXT_TAIL; i++)
0376         WRITE_ONCE(rsclp->tails[i], &rhp->next);
0377     return true;
0378 }
0379 
0380 /*
0381  * Extract only those callbacks ready to be invoked from the specified
0382  * rcu_segcblist structure and place them in the specified rcu_cblist
0383  * structure.
0384  */
0385 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
0386                     struct rcu_cblist *rclp)
0387 {
0388     int i;
0389 
0390     if (!rcu_segcblist_ready_cbs(rsclp))
0391         return; /* Nothing to do. */
0392     rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL);
0393     *rclp->tail = rsclp->head;
0394     WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]);
0395     WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
0396     rclp->tail = rsclp->tails[RCU_DONE_TAIL];
0397     for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
0398         if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL])
0399             WRITE_ONCE(rsclp->tails[i], &rsclp->head);
0400     rcu_segcblist_set_seglen(rsclp, RCU_DONE_TAIL, 0);
0401 }
0402 
0403 /*
0404  * Extract only those callbacks still pending (not yet ready to be
0405  * invoked) from the specified rcu_segcblist structure and place them in
0406  * the specified rcu_cblist structure.  Note that this loses information
0407  * about any callbacks that might have been partway done waiting for
0408  * their grace period.  Too bad!  They will have to start over.
0409  */
0410 void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
0411                     struct rcu_cblist *rclp)
0412 {
0413     int i;
0414 
0415     if (!rcu_segcblist_pend_cbs(rsclp))
0416         return; /* Nothing to do. */
0417     rclp->len = 0;
0418     *rclp->tail = *rsclp->tails[RCU_DONE_TAIL];
0419     rclp->tail = rsclp->tails[RCU_NEXT_TAIL];
0420     WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
0421     for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) {
0422         rclp->len += rcu_segcblist_get_seglen(rsclp, i);
0423         WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_DONE_TAIL]);
0424         rcu_segcblist_set_seglen(rsclp, i, 0);
0425     }
0426 }
0427 
0428 /*
0429  * Insert counts from the specified rcu_cblist structure in the
0430  * specified rcu_segcblist structure.
0431  */
0432 void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
0433                 struct rcu_cblist *rclp)
0434 {
0435     rcu_segcblist_add_len(rsclp, rclp->len);
0436 }
0437 
0438 /*
0439  * Move callbacks from the specified rcu_cblist to the beginning of the
0440  * done-callbacks segment of the specified rcu_segcblist.
0441  */
0442 void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
0443                    struct rcu_cblist *rclp)
0444 {
0445     int i;
0446 
0447     if (!rclp->head)
0448         return; /* No callbacks to move. */
0449     rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len);
0450     *rclp->tail = rsclp->head;
0451     WRITE_ONCE(rsclp->head, rclp->head);
0452     for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
0453         if (&rsclp->head == rsclp->tails[i])
0454             WRITE_ONCE(rsclp->tails[i], rclp->tail);
0455         else
0456             break;
0457     rclp->head = NULL;
0458     rclp->tail = &rclp->head;
0459 }
0460 
0461 /*
0462  * Move callbacks from the specified rcu_cblist to the end of the
0463  * new-callbacks segment of the specified rcu_segcblist.
0464  */
0465 void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
0466                    struct rcu_cblist *rclp)
0467 {
0468     if (!rclp->head)
0469         return; /* Nothing to do. */
0470 
0471     rcu_segcblist_add_seglen(rsclp, RCU_NEXT_TAIL, rclp->len);
0472     WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head);
0473     WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], rclp->tail);
0474 }
0475 
0476 /*
0477  * Advance the callbacks in the specified rcu_segcblist structure based
0478  * on the current value passed in for the grace-period counter.
0479  */
0480 void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
0481 {
0482     int i, j;
0483 
0484     WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
0485     if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
0486         return;
0487 
0488     /*
0489      * Find all callbacks whose ->gp_seq numbers indicate that they
0490      * are ready to invoke, and put them into the RCU_DONE_TAIL segment.
0491      */
0492     for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
0493         if (ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
0494             break;
0495         WRITE_ONCE(rsclp->tails[RCU_DONE_TAIL], rsclp->tails[i]);
0496         rcu_segcblist_move_seglen(rsclp, i, RCU_DONE_TAIL);
0497     }
0498 
0499     /* If no callbacks moved, nothing more need be done. */
0500     if (i == RCU_WAIT_TAIL)
0501         return;
0502 
0503     /* Clean up tail pointers that might have been misordered above. */
0504     for (j = RCU_WAIT_TAIL; j < i; j++)
0505         WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]);
0506 
0507     /*
0508      * Callbacks moved, so there might be an empty RCU_WAIT_TAIL
0509      * and a non-empty RCU_NEXT_READY_TAIL.  If so, copy the
0510      * RCU_NEXT_READY_TAIL segment to fill the RCU_WAIT_TAIL gap
0511      * created by the now-ready-to-invoke segments.
0512      */
0513     for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
0514         if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL])
0515             break;  /* No more callbacks. */
0516         WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]);
0517         rcu_segcblist_move_seglen(rsclp, i, j);
0518         rsclp->gp_seq[j] = rsclp->gp_seq[i];
0519     }
0520 }
0521 
0522 /*
0523  * "Accelerate" callbacks based on more-accurate grace-period information.
0524  * The reason for this is that RCU does not synchronize the beginnings and
0525  * ends of grace periods, and that callbacks are posted locally.  This in
0526  * turn means that the callbacks must be labelled conservatively early
0527  * on, as getting exact information would degrade both performance and
0528  * scalability.  When more accurate grace-period information becomes
0529  * available, previously posted callbacks can be "accelerated", marking
0530  * them to complete at the end of the earlier grace period.
0531  *
0532  * This function operates on an rcu_segcblist structure, and also the
0533  * grace-period sequence number seq at which new callbacks would become
0534  * ready to invoke.  Returns true if there are callbacks that won't be
0535  * ready to invoke until seq, false otherwise.
0536  */
0537 bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
0538 {
0539     int i, j;
0540 
0541     WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
0542     if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
0543         return false;
0544 
0545     /*
0546      * Find the segment preceding the oldest segment of callbacks
0547      * whose ->gp_seq[] completion is at or after that passed in via
0548      * "seq", skipping any empty segments.  This oldest segment, along
0549      * with any later segments, can be merged in with any newly arrived
0550      * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq"
0551      * as their ->gp_seq[] grace-period completion sequence number.
0552      */
0553     for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--)
0554         if (rsclp->tails[i] != rsclp->tails[i - 1] &&
0555             ULONG_CMP_LT(rsclp->gp_seq[i], seq))
0556             break;
0557 
0558     /*
0559      * If all the segments contain callbacks that correspond to
0560      * earlier grace-period sequence numbers than "seq", leave.
0561      * Assuming that the rcu_segcblist structure has enough
0562      * segments in its arrays, this can only happen if some of
0563      * the non-done segments contain callbacks that really are
0564      * ready to invoke.  This situation will get straightened
0565      * out by the next call to rcu_segcblist_advance().
0566      *
0567      * Also advance to the oldest segment of callbacks whose
0568      * ->gp_seq[] completion is at or after that passed in via "seq",
0569      * skipping any empty segments.
0570      *
0571      * Note that segment "i" (and any lower-numbered segments
0572      * containing older callbacks) will be unaffected, and their
0573      * grace-period numbers remain unchanged.  For example, if i ==
0574      * WAIT_TAIL, then neither WAIT_TAIL nor DONE_TAIL will be touched.
0575      * Instead, the CBs in NEXT_TAIL will be merged with those in
0576      * NEXT_READY_TAIL and the grace-period number of NEXT_READY_TAIL
0577      * would be updated.  NEXT_TAIL would then be empty.
0578      */
0579     if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL)
0580         return false;
0581 
0582     /* Accounting: everything below i is about to get merged into i. */
0583     for (j = i + 1; j <= RCU_NEXT_TAIL; j++)
0584         rcu_segcblist_move_seglen(rsclp, j, i);
0585 
0586     /*
0587      * Merge all later callbacks, including newly arrived callbacks,
0588      * into the segment located by the for-loop above.  Assign "seq"
0589      * as the ->gp_seq[] value in order to correctly handle the case
0590      * where there were no pending callbacks in the rcu_segcblist
0591      * structure other than in the RCU_NEXT_TAIL segment.
0592      */
0593     for (; i < RCU_NEXT_TAIL; i++) {
0594         WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_NEXT_TAIL]);
0595         rsclp->gp_seq[i] = seq;
0596     }
0597     return true;
0598 }
0599 
0600 /*
0601  * Merge the source rcu_segcblist structure into the destination
0602  * rcu_segcblist structure, then initialize the source.  Any pending
0603  * callbacks from the source get to start over.  It is best to
0604  * advance and accelerate both the destination and the source
0605  * before merging.
0606  */
0607 void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
0608              struct rcu_segcblist *src_rsclp)
0609 {
0610     struct rcu_cblist donecbs;
0611     struct rcu_cblist pendcbs;
0612 
0613     lockdep_assert_cpus_held();
0614 
0615     rcu_cblist_init(&donecbs);
0616     rcu_cblist_init(&pendcbs);
0617 
0618     rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs);
0619     rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs);
0620 
0621     /*
0622      * No need smp_mb() before setting length to 0, because CPU hotplug
0623      * lock excludes rcu_barrier.
0624      */
0625     rcu_segcblist_set_len(src_rsclp, 0);
0626 
0627     rcu_segcblist_insert_count(dst_rsclp, &donecbs);
0628     rcu_segcblist_insert_count(dst_rsclp, &pendcbs);
0629     rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs);
0630     rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs);
0631 
0632     rcu_segcblist_init(src_rsclp);
0633 }