Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0+ */
0002 /*
0003  * RCU expedited grace periods
0004  *
0005  * Copyright IBM Corporation, 2016
0006  *
0007  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
0008  */
0009 
0010 #include <linux/lockdep.h>
0011 
0012 static void rcu_exp_handler(void *unused);
0013 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
0014 
0015 /*
0016  * Record the start of an expedited grace period.
0017  */
0018 static void rcu_exp_gp_seq_start(void)
0019 {
0020     rcu_seq_start(&rcu_state.expedited_sequence);
0021     rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
0022 }
0023 
0024 /*
0025  * Return the value that the expedited-grace-period counter will have
0026  * at the end of the current grace period.
0027  */
0028 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
0029 {
0030     return rcu_seq_endval(&rcu_state.expedited_sequence);
0031 }
0032 
0033 /*
0034  * Record the end of an expedited grace period.
0035  */
0036 static void rcu_exp_gp_seq_end(void)
0037 {
0038     rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
0039     rcu_seq_end(&rcu_state.expedited_sequence);
0040     smp_mb(); /* Ensure that consecutive grace periods serialize. */
0041 }
0042 
0043 /*
0044  * Take a snapshot of the expedited-grace-period counter, which is the
0045  * earliest value that will indicate that a full grace period has
0046  * elapsed since the current time.
0047  */
0048 static unsigned long rcu_exp_gp_seq_snap(void)
0049 {
0050     unsigned long s;
0051 
0052     smp_mb(); /* Caller's modifications seen first by other CPUs. */
0053     s = rcu_seq_snap(&rcu_state.expedited_sequence);
0054     trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
0055     return s;
0056 }
0057 
0058 /*
0059  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
0060  * if a full expedited grace period has elapsed since that snapshot
0061  * was taken.
0062  */
0063 static bool rcu_exp_gp_seq_done(unsigned long s)
0064 {
0065     return rcu_seq_done(&rcu_state.expedited_sequence, s);
0066 }
0067 
0068 /*
0069  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
0070  * recent CPU-online activity.  Note that these masks are not cleared
0071  * when CPUs go offline, so they reflect the union of all CPUs that have
0072  * ever been online.  This means that this function normally takes its
0073  * no-work-to-do fastpath.
0074  */
0075 static void sync_exp_reset_tree_hotplug(void)
0076 {
0077     bool done;
0078     unsigned long flags;
0079     unsigned long mask;
0080     unsigned long oldmask;
0081     int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
0082     struct rcu_node *rnp;
0083     struct rcu_node *rnp_up;
0084 
0085     /* If no new CPUs onlined since last time, nothing to do. */
0086     if (likely(ncpus == rcu_state.ncpus_snap))
0087         return;
0088     rcu_state.ncpus_snap = ncpus;
0089 
0090     /*
0091      * Each pass through the following loop propagates newly onlined
0092      * CPUs for the current rcu_node structure up the rcu_node tree.
0093      */
0094     rcu_for_each_leaf_node(rnp) {
0095         raw_spin_lock_irqsave_rcu_node(rnp, flags);
0096         if (rnp->expmaskinit == rnp->expmaskinitnext) {
0097             raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0098             continue;  /* No new CPUs, nothing to do. */
0099         }
0100 
0101         /* Update this node's mask, track old value for propagation. */
0102         oldmask = rnp->expmaskinit;
0103         rnp->expmaskinit = rnp->expmaskinitnext;
0104         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0105 
0106         /* If was already nonzero, nothing to propagate. */
0107         if (oldmask)
0108             continue;
0109 
0110         /* Propagate the new CPU up the tree. */
0111         mask = rnp->grpmask;
0112         rnp_up = rnp->parent;
0113         done = false;
0114         while (rnp_up) {
0115             raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
0116             if (rnp_up->expmaskinit)
0117                 done = true;
0118             rnp_up->expmaskinit |= mask;
0119             raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
0120             if (done)
0121                 break;
0122             mask = rnp_up->grpmask;
0123             rnp_up = rnp_up->parent;
0124         }
0125     }
0126 }
0127 
0128 /*
0129  * Reset the ->expmask values in the rcu_node tree in preparation for
0130  * a new expedited grace period.
0131  */
0132 static void __maybe_unused sync_exp_reset_tree(void)
0133 {
0134     unsigned long flags;
0135     struct rcu_node *rnp;
0136 
0137     sync_exp_reset_tree_hotplug();
0138     rcu_for_each_node_breadth_first(rnp) {
0139         raw_spin_lock_irqsave_rcu_node(rnp, flags);
0140         WARN_ON_ONCE(rnp->expmask);
0141         WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
0142         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0143     }
0144 }
0145 
0146 /*
0147  * Return non-zero if there is no RCU expedited grace period in progress
0148  * for the specified rcu_node structure, in other words, if all CPUs and
0149  * tasks covered by the specified rcu_node structure have done their bit
0150  * for the current expedited grace period.
0151  */
0152 static bool sync_rcu_exp_done(struct rcu_node *rnp)
0153 {
0154     raw_lockdep_assert_held_rcu_node(rnp);
0155     return READ_ONCE(rnp->exp_tasks) == NULL &&
0156            READ_ONCE(rnp->expmask) == 0;
0157 }
0158 
0159 /*
0160  * Like sync_rcu_exp_done(), but where the caller does not hold the
0161  * rcu_node's ->lock.
0162  */
0163 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
0164 {
0165     unsigned long flags;
0166     bool ret;
0167 
0168     raw_spin_lock_irqsave_rcu_node(rnp, flags);
0169     ret = sync_rcu_exp_done(rnp);
0170     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0171 
0172     return ret;
0173 }
0174 
0175 
0176 /*
0177  * Report the exit from RCU read-side critical section for the last task
0178  * that queued itself during or before the current expedited preemptible-RCU
0179  * grace period.  This event is reported either to the rcu_node structure on
0180  * which the task was queued or to one of that rcu_node structure's ancestors,
0181  * recursively up the tree.  (Calm down, calm down, we do the recursion
0182  * iteratively!)
0183  */
0184 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
0185                  bool wake, unsigned long flags)
0186     __releases(rnp->lock)
0187 {
0188     unsigned long mask;
0189 
0190     raw_lockdep_assert_held_rcu_node(rnp);
0191     for (;;) {
0192         if (!sync_rcu_exp_done(rnp)) {
0193             if (!rnp->expmask)
0194                 rcu_initiate_boost(rnp, flags);
0195             else
0196                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0197             break;
0198         }
0199         if (rnp->parent == NULL) {
0200             raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0201             if (wake) {
0202                 smp_mb(); /* EGP done before wake_up(). */
0203                 swake_up_one(&rcu_state.expedited_wq);
0204             }
0205             break;
0206         }
0207         mask = rnp->grpmask;
0208         raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
0209         rnp = rnp->parent;
0210         raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
0211         WARN_ON_ONCE(!(rnp->expmask & mask));
0212         WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
0213     }
0214 }
0215 
0216 /*
0217  * Report expedited quiescent state for specified node.  This is a
0218  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
0219  */
0220 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
0221 {
0222     unsigned long flags;
0223 
0224     raw_spin_lock_irqsave_rcu_node(rnp, flags);
0225     __rcu_report_exp_rnp(rnp, wake, flags);
0226 }
0227 
0228 /*
0229  * Report expedited quiescent state for multiple CPUs, all covered by the
0230  * specified leaf rcu_node structure.
0231  */
0232 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
0233                     unsigned long mask, bool wake)
0234 {
0235     int cpu;
0236     unsigned long flags;
0237     struct rcu_data *rdp;
0238 
0239     raw_spin_lock_irqsave_rcu_node(rnp, flags);
0240     if (!(rnp->expmask & mask)) {
0241         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0242         return;
0243     }
0244     WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
0245     for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
0246         rdp = per_cpu_ptr(&rcu_data, cpu);
0247         if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
0248             continue;
0249         rdp->rcu_forced_tick_exp = false;
0250         tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
0251     }
0252     __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
0253 }
0254 
0255 /*
0256  * Report expedited quiescent state for specified rcu_data (CPU).
0257  */
0258 static void rcu_report_exp_rdp(struct rcu_data *rdp)
0259 {
0260     WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
0261     rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
0262 }
0263 
0264 /* Common code for work-done checking. */
0265 static bool sync_exp_work_done(unsigned long s)
0266 {
0267     if (rcu_exp_gp_seq_done(s)) {
0268         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
0269         smp_mb(); /* Ensure test happens before caller kfree(). */
0270         return true;
0271     }
0272     return false;
0273 }
0274 
0275 /*
0276  * Funnel-lock acquisition for expedited grace periods.  Returns true
0277  * if some other task completed an expedited grace period that this task
0278  * can piggy-back on, and with no mutex held.  Otherwise, returns false
0279  * with the mutex held, indicating that the caller must actually do the
0280  * expedited grace period.
0281  */
0282 static bool exp_funnel_lock(unsigned long s)
0283 {
0284     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
0285     struct rcu_node *rnp = rdp->mynode;
0286     struct rcu_node *rnp_root = rcu_get_root();
0287 
0288     /* Low-contention fastpath. */
0289     if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
0290         (rnp == rnp_root ||
0291          ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
0292         mutex_trylock(&rcu_state.exp_mutex))
0293         goto fastpath;
0294 
0295     /*
0296      * Each pass through the following loop works its way up
0297      * the rcu_node tree, returning if others have done the work or
0298      * otherwise falls through to acquire ->exp_mutex.  The mapping
0299      * from CPU to rcu_node structure can be inexact, as it is just
0300      * promoting locality and is not strictly needed for correctness.
0301      */
0302     for (; rnp != NULL; rnp = rnp->parent) {
0303         if (sync_exp_work_done(s))
0304             return true;
0305 
0306         /* Work not done, either wait here or go up. */
0307         spin_lock(&rnp->exp_lock);
0308         if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
0309 
0310             /* Someone else doing GP, so wait for them. */
0311             spin_unlock(&rnp->exp_lock);
0312             trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
0313                           rnp->grplo, rnp->grphi,
0314                           TPS("wait"));
0315             wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
0316                    sync_exp_work_done(s));
0317             return true;
0318         }
0319         WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
0320         spin_unlock(&rnp->exp_lock);
0321         trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
0322                       rnp->grplo, rnp->grphi, TPS("nxtlvl"));
0323     }
0324     mutex_lock(&rcu_state.exp_mutex);
0325 fastpath:
0326     if (sync_exp_work_done(s)) {
0327         mutex_unlock(&rcu_state.exp_mutex);
0328         return true;
0329     }
0330     rcu_exp_gp_seq_start();
0331     trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
0332     return false;
0333 }
0334 
0335 /*
0336  * Select the CPUs within the specified rcu_node that the upcoming
0337  * expedited grace period needs to wait for.
0338  */
0339 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
0340 {
0341     int cpu;
0342     unsigned long flags;
0343     unsigned long mask_ofl_test;
0344     unsigned long mask_ofl_ipi;
0345     int ret;
0346     struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
0347 
0348     raw_spin_lock_irqsave_rcu_node(rnp, flags);
0349 
0350     /* Each pass checks a CPU for identity, offline, and idle. */
0351     mask_ofl_test = 0;
0352     for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
0353         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
0354         unsigned long mask = rdp->grpmask;
0355         int snap;
0356 
0357         if (raw_smp_processor_id() == cpu ||
0358             !(rnp->qsmaskinitnext & mask)) {
0359             mask_ofl_test |= mask;
0360         } else {
0361             snap = rcu_dynticks_snap(cpu);
0362             if (rcu_dynticks_in_eqs(snap))
0363                 mask_ofl_test |= mask;
0364             else
0365                 rdp->exp_dynticks_snap = snap;
0366         }
0367     }
0368     mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
0369 
0370     /*
0371      * Need to wait for any blocked tasks as well.  Note that
0372      * additional blocking tasks will also block the expedited GP
0373      * until such time as the ->expmask bits are cleared.
0374      */
0375     if (rcu_preempt_has_tasks(rnp))
0376         WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
0377     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0378 
0379     /* IPI the remaining CPUs for expedited quiescent state. */
0380     for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
0381         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
0382         unsigned long mask = rdp->grpmask;
0383 
0384 retry_ipi:
0385         if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
0386             mask_ofl_test |= mask;
0387             continue;
0388         }
0389         if (get_cpu() == cpu) {
0390             mask_ofl_test |= mask;
0391             put_cpu();
0392             continue;
0393         }
0394         ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
0395         put_cpu();
0396         /* The CPU will report the QS in response to the IPI. */
0397         if (!ret)
0398             continue;
0399 
0400         /* Failed, raced with CPU hotplug operation. */
0401         raw_spin_lock_irqsave_rcu_node(rnp, flags);
0402         if ((rnp->qsmaskinitnext & mask) &&
0403             (rnp->expmask & mask)) {
0404             /* Online, so delay for a bit and try again. */
0405             raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0406             trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
0407             schedule_timeout_idle(1);
0408             goto retry_ipi;
0409         }
0410         /* CPU really is offline, so we must report its QS. */
0411         if (rnp->expmask & mask)
0412             mask_ofl_test |= mask;
0413         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0414     }
0415     /* Report quiescent states for those that went offline. */
0416     if (mask_ofl_test)
0417         rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
0418 }
0419 
0420 static void rcu_exp_sel_wait_wake(unsigned long s);
0421 
0422 #ifdef CONFIG_RCU_EXP_KTHREAD
0423 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
0424 {
0425     struct rcu_exp_work *rewp =
0426         container_of(wp, struct rcu_exp_work, rew_work);
0427 
0428     __sync_rcu_exp_select_node_cpus(rewp);
0429 }
0430 
0431 static inline bool rcu_gp_par_worker_started(void)
0432 {
0433     return !!READ_ONCE(rcu_exp_par_gp_kworker);
0434 }
0435 
0436 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
0437 {
0438     kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
0439     /*
0440      * Use rcu_exp_par_gp_kworker, because flushing a work item from
0441      * another work item on the same kthread worker can result in
0442      * deadlock.
0443      */
0444     kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work);
0445 }
0446 
0447 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
0448 {
0449     kthread_flush_work(&rnp->rew.rew_work);
0450 }
0451 
0452 /*
0453  * Work-queue handler to drive an expedited grace period forward.
0454  */
0455 static void wait_rcu_exp_gp(struct kthread_work *wp)
0456 {
0457     struct rcu_exp_work *rewp;
0458 
0459     rewp = container_of(wp, struct rcu_exp_work, rew_work);
0460     rcu_exp_sel_wait_wake(rewp->rew_s);
0461 }
0462 
0463 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
0464 {
0465     kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
0466     kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
0467 }
0468 
0469 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
0470 {
0471 }
0472 #else /* !CONFIG_RCU_EXP_KTHREAD */
0473 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
0474 {
0475     struct rcu_exp_work *rewp =
0476         container_of(wp, struct rcu_exp_work, rew_work);
0477 
0478     __sync_rcu_exp_select_node_cpus(rewp);
0479 }
0480 
0481 static inline bool rcu_gp_par_worker_started(void)
0482 {
0483     return !!READ_ONCE(rcu_par_gp_wq);
0484 }
0485 
0486 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
0487 {
0488     int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
0489 
0490     INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
0491     /* If all offline, queue the work on an unbound CPU. */
0492     if (unlikely(cpu > rnp->grphi - rnp->grplo))
0493         cpu = WORK_CPU_UNBOUND;
0494     else
0495         cpu += rnp->grplo;
0496     queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
0497 }
0498 
0499 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
0500 {
0501     flush_work(&rnp->rew.rew_work);
0502 }
0503 
0504 /*
0505  * Work-queue handler to drive an expedited grace period forward.
0506  */
0507 static void wait_rcu_exp_gp(struct work_struct *wp)
0508 {
0509     struct rcu_exp_work *rewp;
0510 
0511     rewp = container_of(wp, struct rcu_exp_work, rew_work);
0512     rcu_exp_sel_wait_wake(rewp->rew_s);
0513 }
0514 
0515 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
0516 {
0517     INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp);
0518     queue_work(rcu_gp_wq, &rew->rew_work);
0519 }
0520 
0521 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
0522 {
0523     destroy_work_on_stack(&rew->rew_work);
0524 }
0525 #endif /* CONFIG_RCU_EXP_KTHREAD */
0526 
0527 /*
0528  * Select the nodes that the upcoming expedited grace period needs
0529  * to wait for.
0530  */
0531 static void sync_rcu_exp_select_cpus(void)
0532 {
0533     struct rcu_node *rnp;
0534 
0535     trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
0536     sync_exp_reset_tree();
0537     trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
0538 
0539     /* Schedule work for each leaf rcu_node structure. */
0540     rcu_for_each_leaf_node(rnp) {
0541         rnp->exp_need_flush = false;
0542         if (!READ_ONCE(rnp->expmask))
0543             continue; /* Avoid early boot non-existent wq. */
0544         if (!rcu_gp_par_worker_started() ||
0545             rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
0546             rcu_is_last_leaf_node(rnp)) {
0547             /* No worker started yet or last leaf, do direct call. */
0548             sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
0549             continue;
0550         }
0551         sync_rcu_exp_select_cpus_queue_work(rnp);
0552         rnp->exp_need_flush = true;
0553     }
0554 
0555     /* Wait for jobs (if any) to complete. */
0556     rcu_for_each_leaf_node(rnp)
0557         if (rnp->exp_need_flush)
0558             sync_rcu_exp_select_cpus_flush_work(rnp);
0559 }
0560 
0561 /*
0562  * Wait for the expedited grace period to elapse, within time limit.
0563  * If the time limit is exceeded without the grace period elapsing,
0564  * return false, otherwise return true.
0565  */
0566 static bool synchronize_rcu_expedited_wait_once(long tlimit)
0567 {
0568     int t;
0569     struct rcu_node *rnp_root = rcu_get_root();
0570 
0571     t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
0572                       sync_rcu_exp_done_unlocked(rnp_root),
0573                       tlimit);
0574     // Workqueues should not be signaled.
0575     if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
0576         return true;
0577     WARN_ON(t < 0);  /* workqueues should not be signaled. */
0578     return false;
0579 }
0580 
0581 /*
0582  * Wait for the expedited grace period to elapse, issuing any needed
0583  * RCU CPU stall warnings along the way.
0584  */
0585 static void synchronize_rcu_expedited_wait(void)
0586 {
0587     int cpu;
0588     unsigned long j;
0589     unsigned long jiffies_stall;
0590     unsigned long jiffies_start;
0591     unsigned long mask;
0592     int ndetected;
0593     struct rcu_data *rdp;
0594     struct rcu_node *rnp;
0595     struct rcu_node *rnp_root = rcu_get_root();
0596 
0597     trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
0598     jiffies_stall = rcu_exp_jiffies_till_stall_check();
0599     jiffies_start = jiffies;
0600     if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
0601         if (synchronize_rcu_expedited_wait_once(1))
0602             return;
0603         rcu_for_each_leaf_node(rnp) {
0604             mask = READ_ONCE(rnp->expmask);
0605             for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
0606                 rdp = per_cpu_ptr(&rcu_data, cpu);
0607                 if (rdp->rcu_forced_tick_exp)
0608                     continue;
0609                 rdp->rcu_forced_tick_exp = true;
0610                 preempt_disable();
0611                 if (cpu_online(cpu))
0612                     tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
0613                 preempt_enable();
0614             }
0615         }
0616         j = READ_ONCE(jiffies_till_first_fqs);
0617         if (synchronize_rcu_expedited_wait_once(j + HZ))
0618             return;
0619     }
0620 
0621     for (;;) {
0622         if (synchronize_rcu_expedited_wait_once(jiffies_stall))
0623             return;
0624         if (rcu_stall_is_suppressed())
0625             continue;
0626         trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
0627         pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
0628                rcu_state.name);
0629         ndetected = 0;
0630         rcu_for_each_leaf_node(rnp) {
0631             ndetected += rcu_print_task_exp_stall(rnp);
0632             for_each_leaf_node_possible_cpu(rnp, cpu) {
0633                 struct rcu_data *rdp;
0634 
0635                 mask = leaf_node_cpu_bit(rnp, cpu);
0636                 if (!(READ_ONCE(rnp->expmask) & mask))
0637                     continue;
0638                 ndetected++;
0639                 rdp = per_cpu_ptr(&rcu_data, cpu);
0640                 pr_cont(" %d-%c%c%c%c", cpu,
0641                     "O."[!!cpu_online(cpu)],
0642                     "o."[!!(rdp->grpmask & rnp->expmaskinit)],
0643                     "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
0644                     "D."[!!(rdp->cpu_no_qs.b.exp)]);
0645             }
0646         }
0647         pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
0648             jiffies - jiffies_start, rcu_state.expedited_sequence,
0649             data_race(rnp_root->expmask),
0650             ".T"[!!data_race(rnp_root->exp_tasks)]);
0651         if (ndetected) {
0652             pr_err("blocking rcu_node structures (internal RCU debug):");
0653             rcu_for_each_node_breadth_first(rnp) {
0654                 if (rnp == rnp_root)
0655                     continue; /* printed unconditionally */
0656                 if (sync_rcu_exp_done_unlocked(rnp))
0657                     continue;
0658                 pr_cont(" l=%u:%d-%d:%#lx/%c",
0659                     rnp->level, rnp->grplo, rnp->grphi,
0660                     data_race(rnp->expmask),
0661                     ".T"[!!data_race(rnp->exp_tasks)]);
0662             }
0663             pr_cont("\n");
0664         }
0665         rcu_for_each_leaf_node(rnp) {
0666             for_each_leaf_node_possible_cpu(rnp, cpu) {
0667                 mask = leaf_node_cpu_bit(rnp, cpu);
0668                 if (!(READ_ONCE(rnp->expmask) & mask))
0669                     continue;
0670                 dump_cpu_task(cpu);
0671             }
0672         }
0673         jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
0674         panic_on_rcu_stall();
0675     }
0676 }
0677 
0678 /*
0679  * Wait for the current expedited grace period to complete, and then
0680  * wake up everyone who piggybacked on the just-completed expedited
0681  * grace period.  Also update all the ->exp_seq_rq counters as needed
0682  * in order to avoid counter-wrap problems.
0683  */
0684 static void rcu_exp_wait_wake(unsigned long s)
0685 {
0686     struct rcu_node *rnp;
0687 
0688     synchronize_rcu_expedited_wait();
0689 
0690     // Switch over to wakeup mode, allowing the next GP to proceed.
0691     // End the previous grace period only after acquiring the mutex
0692     // to ensure that only one GP runs concurrently with wakeups.
0693     mutex_lock(&rcu_state.exp_wake_mutex);
0694     rcu_exp_gp_seq_end();
0695     trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
0696 
0697     rcu_for_each_node_breadth_first(rnp) {
0698         if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
0699             spin_lock(&rnp->exp_lock);
0700             /* Recheck, avoid hang in case someone just arrived. */
0701             if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
0702                 WRITE_ONCE(rnp->exp_seq_rq, s);
0703             spin_unlock(&rnp->exp_lock);
0704         }
0705         smp_mb(); /* All above changes before wakeup. */
0706         wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
0707     }
0708     trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
0709     mutex_unlock(&rcu_state.exp_wake_mutex);
0710 }
0711 
0712 /*
0713  * Common code to drive an expedited grace period forward, used by
0714  * workqueues and mid-boot-time tasks.
0715  */
0716 static void rcu_exp_sel_wait_wake(unsigned long s)
0717 {
0718     /* Initialize the rcu_node tree in preparation for the wait. */
0719     sync_rcu_exp_select_cpus();
0720 
0721     /* Wait and clean up, including waking everyone. */
0722     rcu_exp_wait_wake(s);
0723 }
0724 
0725 #ifdef CONFIG_PREEMPT_RCU
0726 
0727 /*
0728  * Remote handler for smp_call_function_single().  If there is an
0729  * RCU read-side critical section in effect, request that the
0730  * next rcu_read_unlock() record the quiescent state up the
0731  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
0732  * report the quiescent state.
0733  */
0734 static void rcu_exp_handler(void *unused)
0735 {
0736     int depth = rcu_preempt_depth();
0737     unsigned long flags;
0738     struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
0739     struct rcu_node *rnp = rdp->mynode;
0740     struct task_struct *t = current;
0741 
0742     /*
0743      * First, the common case of not being in an RCU read-side
0744      * critical section.  If also enabled or idle, immediately
0745      * report the quiescent state, otherwise defer.
0746      */
0747     if (!depth) {
0748         if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
0749             rcu_is_cpu_rrupt_from_idle()) {
0750             rcu_report_exp_rdp(rdp);
0751         } else {
0752             WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
0753             set_tsk_need_resched(t);
0754             set_preempt_need_resched();
0755         }
0756         return;
0757     }
0758 
0759     /*
0760      * Second, the less-common case of being in an RCU read-side
0761      * critical section.  In this case we can count on a future
0762      * rcu_read_unlock().  However, this rcu_read_unlock() might
0763      * execute on some other CPU, but in that case there will be
0764      * a future context switch.  Either way, if the expedited
0765      * grace period is still waiting on this CPU, set ->deferred_qs
0766      * so that the eventual quiescent state will be reported.
0767      * Note that there is a large group of race conditions that
0768      * can have caused this quiescent state to already have been
0769      * reported, so we really do need to check ->expmask.
0770      */
0771     if (depth > 0) {
0772         raw_spin_lock_irqsave_rcu_node(rnp, flags);
0773         if (rnp->expmask & rdp->grpmask) {
0774             WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
0775             t->rcu_read_unlock_special.b.exp_hint = true;
0776         }
0777         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0778         return;
0779     }
0780 
0781     // Finally, negative nesting depth should not happen.
0782     WARN_ON_ONCE(1);
0783 }
0784 
0785 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
0786 static void sync_sched_exp_online_cleanup(int cpu)
0787 {
0788 }
0789 
0790 /*
0791  * Scan the current list of tasks blocked within RCU read-side critical
0792  * sections, printing out the tid of each that is blocking the current
0793  * expedited grace period.
0794  */
0795 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
0796 {
0797     unsigned long flags;
0798     int ndetected = 0;
0799     struct task_struct *t;
0800 
0801     if (!READ_ONCE(rnp->exp_tasks))
0802         return 0;
0803     raw_spin_lock_irqsave_rcu_node(rnp, flags);
0804     t = list_entry(rnp->exp_tasks->prev,
0805                struct task_struct, rcu_node_entry);
0806     list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
0807         pr_cont(" P%d", t->pid);
0808         ndetected++;
0809     }
0810     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
0811     return ndetected;
0812 }
0813 
0814 #else /* #ifdef CONFIG_PREEMPT_RCU */
0815 
0816 /* Request an expedited quiescent state. */
0817 static void rcu_exp_need_qs(void)
0818 {
0819     __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
0820     /* Store .exp before .rcu_urgent_qs. */
0821     smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
0822     set_tsk_need_resched(current);
0823     set_preempt_need_resched();
0824 }
0825 
0826 /* Invoked on each online non-idle CPU for expedited quiescent state. */
0827 static void rcu_exp_handler(void *unused)
0828 {
0829     struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
0830     struct rcu_node *rnp = rdp->mynode;
0831 
0832     if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
0833         __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
0834         return;
0835     if (rcu_is_cpu_rrupt_from_idle()) {
0836         rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
0837         return;
0838     }
0839     rcu_exp_need_qs();
0840 }
0841 
0842 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
0843 static void sync_sched_exp_online_cleanup(int cpu)
0844 {
0845     unsigned long flags;
0846     int my_cpu;
0847     struct rcu_data *rdp;
0848     int ret;
0849     struct rcu_node *rnp;
0850 
0851     rdp = per_cpu_ptr(&rcu_data, cpu);
0852     rnp = rdp->mynode;
0853     my_cpu = get_cpu();
0854     /* Quiescent state either not needed or already requested, leave. */
0855     if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
0856         READ_ONCE(rdp->cpu_no_qs.b.exp)) {
0857         put_cpu();
0858         return;
0859     }
0860     /* Quiescent state needed on current CPU, so set it up locally. */
0861     if (my_cpu == cpu) {
0862         local_irq_save(flags);
0863         rcu_exp_need_qs();
0864         local_irq_restore(flags);
0865         put_cpu();
0866         return;
0867     }
0868     /* Quiescent state needed on some other CPU, send IPI. */
0869     ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
0870     put_cpu();
0871     WARN_ON_ONCE(ret);
0872 }
0873 
0874 /*
0875  * Because preemptible RCU does not exist, we never have to check for
0876  * tasks blocked within RCU read-side critical sections that are
0877  * blocking the current expedited grace period.
0878  */
0879 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
0880 {
0881     return 0;
0882 }
0883 
0884 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
0885 
0886 /**
0887  * synchronize_rcu_expedited - Brute-force RCU grace period
0888  *
0889  * Wait for an RCU grace period, but expedite it.  The basic idea is to
0890  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
0891  * the CPU is in an RCU critical section, and if so, it sets a flag that
0892  * causes the outermost rcu_read_unlock() to report the quiescent state
0893  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
0894  * other hand, if the CPU is not in an RCU read-side critical section,
0895  * the IPI handler reports the quiescent state immediately.
0896  *
0897  * Although this is a great improvement over previous expedited
0898  * implementations, it is still unfriendly to real-time workloads, so is
0899  * thus not recommended for any sort of common-case code.  In fact, if
0900  * you are using synchronize_rcu_expedited() in a loop, please restructure
0901  * your code to batch your updates, and then use a single synchronize_rcu()
0902  * instead.
0903  *
0904  * This has the same semantics as (but is more brutal than) synchronize_rcu().
0905  */
0906 void synchronize_rcu_expedited(void)
0907 {
0908     bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
0909     struct rcu_exp_work rew;
0910     struct rcu_node *rnp;
0911     unsigned long s;
0912 
0913     RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
0914              lock_is_held(&rcu_lock_map) ||
0915              lock_is_held(&rcu_sched_lock_map),
0916              "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
0917 
0918     /* Is the state is such that the call is a grace period? */
0919     if (rcu_blocking_is_gp()) {
0920         // Note well that this code runs with !PREEMPT && !SMP.
0921         // In addition, all code that advances grace periods runs
0922         // at process level.  Therefore, this expedited GP overlaps
0923         // with other expedited GPs only by being fully nested within
0924         // them, which allows reuse of ->gp_seq_polled_exp_snap.
0925         rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
0926         rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
0927         if (rcu_init_invoked())
0928             cond_resched();
0929         return;  // Context allows vacuous grace periods.
0930     }
0931 
0932     /* If expedited grace periods are prohibited, fall back to normal. */
0933     if (rcu_gp_is_normal()) {
0934         wait_rcu_gp(call_rcu);
0935         return;
0936     }
0937 
0938     /* Take a snapshot of the sequence number.  */
0939     s = rcu_exp_gp_seq_snap();
0940     if (exp_funnel_lock(s))
0941         return;  /* Someone else did our work for us. */
0942 
0943     /* Ensure that load happens before action based on it. */
0944     if (unlikely(boottime)) {
0945         /* Direct call during scheduler init and early_initcalls(). */
0946         rcu_exp_sel_wait_wake(s);
0947     } else {
0948         /* Marshall arguments & schedule the expedited grace period. */
0949         rew.rew_s = s;
0950         synchronize_rcu_expedited_queue_work(&rew);
0951     }
0952 
0953     /* Wait for expedited grace period to complete. */
0954     rnp = rcu_get_root();
0955     wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
0956            sync_exp_work_done(s));
0957     smp_mb(); /* Work actions happen before return. */
0958 
0959     /* Let the next expedited grace period start. */
0960     mutex_unlock(&rcu_state.exp_mutex);
0961 
0962     if (likely(!boottime))
0963         synchronize_rcu_expedited_destroy_work(&rew);
0964 }
0965 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
0966 
0967 /*
0968  * Ensure that start_poll_synchronize_rcu_expedited() has the expedited
0969  * RCU grace periods that it needs.
0970  */
0971 static void sync_rcu_do_polled_gp(struct work_struct *wp)
0972 {
0973     unsigned long flags;
0974     int i = 0;
0975     struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
0976     unsigned long s;
0977 
0978     raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
0979     s = rnp->exp_seq_poll_rq;
0980     rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
0981     raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
0982     if (s == RCU_GET_STATE_COMPLETED)
0983         return;
0984     while (!poll_state_synchronize_rcu(s)) {
0985         synchronize_rcu_expedited();
0986         if (i == 10 || i == 20)
0987             pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled));
0988         i++;
0989     }
0990     raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
0991     s = rnp->exp_seq_poll_rq;
0992     if (poll_state_synchronize_rcu(s))
0993         rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
0994     raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
0995 }
0996 
0997 /**
0998  * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
0999  *
1000  * Returns a cookie to pass to a call to cond_synchronize_rcu(),
1001  * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(),
1002  * allowing them to determine whether or not any sort of grace period has
1003  * elapsed in the meantime.  If the needed expedited grace period is not
1004  * already slated to start, initiates that grace period.
1005  */
1006 unsigned long start_poll_synchronize_rcu_expedited(void)
1007 {
1008     unsigned long flags;
1009     struct rcu_data *rdp;
1010     struct rcu_node *rnp;
1011     unsigned long s;
1012 
1013     s = get_state_synchronize_rcu();
1014     rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
1015     rnp = rdp->mynode;
1016     if (rcu_init_invoked())
1017         raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1018     if (!poll_state_synchronize_rcu(s)) {
1019         rnp->exp_seq_poll_rq = s;
1020         if (rcu_init_invoked())
1021             queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1022     }
1023     if (rcu_init_invoked())
1024         raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1025 
1026     return s;
1027 }
1028 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
1029 
1030 /**
1031  * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1032  *
1033  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
1034  *
1035  * If any type of full RCU grace period has elapsed since the earlier
1036  * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(),
1037  * or start_poll_synchronize_rcu_expedited(), just return.  Otherwise,
1038  * invoke synchronize_rcu_expedited() to wait for a full grace period.
1039  *
1040  * Yes, this function does not take counter wrap into account.
1041  * But counter wrap is harmless.  If the counter wraps, we have waited for
1042  * more than 2 billion grace periods (and way more on a 64-bit system!),
1043  * so waiting for a couple of additional grace periods should be just fine.
1044  *
1045  * This function provides the same memory-ordering guarantees that
1046  * would be provided by a synchronize_rcu() that was invoked at the call
1047  * to the function that provided @oldstate and that returned at the end
1048  * of this function.
1049  */
1050 void cond_synchronize_rcu_expedited(unsigned long oldstate)
1051 {
1052     if (!poll_state_synchronize_rcu(oldstate))
1053         synchronize_rcu_expedited();
1054 }
1055 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);