Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
0004  *
0005  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
0006  *
0007  *  Interactivity improvements by Mike Galbraith
0008  *  (C) 2007 Mike Galbraith <efault@gmx.de>
0009  *
0010  *  Various enhancements by Dmitry Adamushko.
0011  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
0012  *
0013  *  Group scheduling enhancements by Srivatsa Vaddagiri
0014  *  Copyright IBM Corporation, 2007
0015  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
0016  *
0017  *  Scaled math optimizations by Thomas Gleixner
0018  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
0019  *
0020  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
0021  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
0022  */
0023 #include <linux/energy_model.h>
0024 #include <linux/mmap_lock.h>
0025 #include <linux/hugetlb_inline.h>
0026 #include <linux/jiffies.h>
0027 #include <linux/mm_api.h>
0028 #include <linux/highmem.h>
0029 #include <linux/spinlock_api.h>
0030 #include <linux/cpumask_api.h>
0031 #include <linux/lockdep_api.h>
0032 #include <linux/softirq.h>
0033 #include <linux/refcount_api.h>
0034 #include <linux/topology.h>
0035 #include <linux/sched/clock.h>
0036 #include <linux/sched/cond_resched.h>
0037 #include <linux/sched/cputime.h>
0038 #include <linux/sched/isolation.h>
0039 #include <linux/sched/nohz.h>
0040 
0041 #include <linux/cpuidle.h>
0042 #include <linux/interrupt.h>
0043 #include <linux/mempolicy.h>
0044 #include <linux/mutex_api.h>
0045 #include <linux/profile.h>
0046 #include <linux/psi.h>
0047 #include <linux/ratelimit.h>
0048 #include <linux/task_work.h>
0049 
0050 #include <asm/switch_to.h>
0051 
0052 #include <linux/sched/cond_resched.h>
0053 
0054 #include "sched.h"
0055 #include "stats.h"
0056 #include "autogroup.h"
0057 
0058 /*
0059  * Targeted preemption latency for CPU-bound tasks:
0060  *
0061  * NOTE: this latency value is not the same as the concept of
0062  * 'timeslice length' - timeslices in CFS are of variable length
0063  * and have no persistent notion like in traditional, time-slice
0064  * based scheduling concepts.
0065  *
0066  * (to see the precise effective timeslice length of your workload,
0067  *  run vmstat and monitor the context-switches (cs) field)
0068  *
0069  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
0070  */
0071 unsigned int sysctl_sched_latency           = 6000000ULL;
0072 static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
0073 
0074 /*
0075  * The initial- and re-scaling of tunables is configurable
0076  *
0077  * Options are:
0078  *
0079  *   SCHED_TUNABLESCALING_NONE - unscaled, always *1
0080  *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
0081  *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
0082  *
0083  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
0084  */
0085 unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
0086 
0087 /*
0088  * Minimal preemption granularity for CPU-bound tasks:
0089  *
0090  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
0091  */
0092 unsigned int sysctl_sched_min_granularity           = 750000ULL;
0093 static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
0094 
0095 /*
0096  * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks.
0097  * Applies only when SCHED_IDLE tasks compete with normal tasks.
0098  *
0099  * (default: 0.75 msec)
0100  */
0101 unsigned int sysctl_sched_idle_min_granularity          = 750000ULL;
0102 
0103 /*
0104  * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
0105  */
0106 static unsigned int sched_nr_latency = 8;
0107 
0108 /*
0109  * After fork, child runs first. If set to 0 (default) then
0110  * parent will (try to) run first.
0111  */
0112 unsigned int sysctl_sched_child_runs_first __read_mostly;
0113 
0114 /*
0115  * SCHED_OTHER wake-up granularity.
0116  *
0117  * This option delays the preemption effects of decoupled workloads
0118  * and reduces their over-scheduling. Synchronous workloads will still
0119  * have immediate wakeup/sleep latencies.
0120  *
0121  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
0122  */
0123 unsigned int sysctl_sched_wakeup_granularity            = 1000000UL;
0124 static unsigned int normalized_sysctl_sched_wakeup_granularity  = 1000000UL;
0125 
0126 const_debug unsigned int sysctl_sched_migration_cost    = 500000UL;
0127 
0128 int sched_thermal_decay_shift;
0129 static int __init setup_sched_thermal_decay_shift(char *str)
0130 {
0131     int _shift = 0;
0132 
0133     if (kstrtoint(str, 0, &_shift))
0134         pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
0135 
0136     sched_thermal_decay_shift = clamp(_shift, 0, 10);
0137     return 1;
0138 }
0139 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
0140 
0141 #ifdef CONFIG_SMP
0142 /*
0143  * For asym packing, by default the lower numbered CPU has higher priority.
0144  */
0145 int __weak arch_asym_cpu_priority(int cpu)
0146 {
0147     return -cpu;
0148 }
0149 
0150 /*
0151  * The margin used when comparing utilization with CPU capacity.
0152  *
0153  * (default: ~20%)
0154  */
0155 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
0156 
0157 /*
0158  * The margin used when comparing CPU capacities.
0159  * is 'cap1' noticeably greater than 'cap2'
0160  *
0161  * (default: ~5%)
0162  */
0163 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
0164 #endif
0165 
0166 #ifdef CONFIG_CFS_BANDWIDTH
0167 /*
0168  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
0169  * each time a cfs_rq requests quota.
0170  *
0171  * Note: in the case that the slice exceeds the runtime remaining (either due
0172  * to consumption or the quota being specified to be smaller than the slice)
0173  * we will always only issue the remaining available time.
0174  *
0175  * (default: 5 msec, units: microseconds)
0176  */
0177 static unsigned int sysctl_sched_cfs_bandwidth_slice        = 5000UL;
0178 #endif
0179 
0180 #ifdef CONFIG_SYSCTL
0181 static struct ctl_table sched_fair_sysctls[] = {
0182     {
0183         .procname       = "sched_child_runs_first",
0184         .data           = &sysctl_sched_child_runs_first,
0185         .maxlen         = sizeof(unsigned int),
0186         .mode           = 0644,
0187         .proc_handler   = proc_dointvec,
0188     },
0189 #ifdef CONFIG_CFS_BANDWIDTH
0190     {
0191         .procname       = "sched_cfs_bandwidth_slice_us",
0192         .data           = &sysctl_sched_cfs_bandwidth_slice,
0193         .maxlen         = sizeof(unsigned int),
0194         .mode           = 0644,
0195         .proc_handler   = proc_dointvec_minmax,
0196         .extra1         = SYSCTL_ONE,
0197     },
0198 #endif
0199     {}
0200 };
0201 
0202 static int __init sched_fair_sysctl_init(void)
0203 {
0204     register_sysctl_init("kernel", sched_fair_sysctls);
0205     return 0;
0206 }
0207 late_initcall(sched_fair_sysctl_init);
0208 #endif
0209 
0210 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
0211 {
0212     lw->weight += inc;
0213     lw->inv_weight = 0;
0214 }
0215 
0216 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
0217 {
0218     lw->weight -= dec;
0219     lw->inv_weight = 0;
0220 }
0221 
0222 static inline void update_load_set(struct load_weight *lw, unsigned long w)
0223 {
0224     lw->weight = w;
0225     lw->inv_weight = 0;
0226 }
0227 
0228 /*
0229  * Increase the granularity value when there are more CPUs,
0230  * because with more CPUs the 'effective latency' as visible
0231  * to users decreases. But the relationship is not linear,
0232  * so pick a second-best guess by going with the log2 of the
0233  * number of CPUs.
0234  *
0235  * This idea comes from the SD scheduler of Con Kolivas:
0236  */
0237 static unsigned int get_update_sysctl_factor(void)
0238 {
0239     unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
0240     unsigned int factor;
0241 
0242     switch (sysctl_sched_tunable_scaling) {
0243     case SCHED_TUNABLESCALING_NONE:
0244         factor = 1;
0245         break;
0246     case SCHED_TUNABLESCALING_LINEAR:
0247         factor = cpus;
0248         break;
0249     case SCHED_TUNABLESCALING_LOG:
0250     default:
0251         factor = 1 + ilog2(cpus);
0252         break;
0253     }
0254 
0255     return factor;
0256 }
0257 
0258 static void update_sysctl(void)
0259 {
0260     unsigned int factor = get_update_sysctl_factor();
0261 
0262 #define SET_SYSCTL(name) \
0263     (sysctl_##name = (factor) * normalized_sysctl_##name)
0264     SET_SYSCTL(sched_min_granularity);
0265     SET_SYSCTL(sched_latency);
0266     SET_SYSCTL(sched_wakeup_granularity);
0267 #undef SET_SYSCTL
0268 }
0269 
0270 void __init sched_init_granularity(void)
0271 {
0272     update_sysctl();
0273 }
0274 
0275 #define WMULT_CONST (~0U)
0276 #define WMULT_SHIFT 32
0277 
0278 static void __update_inv_weight(struct load_weight *lw)
0279 {
0280     unsigned long w;
0281 
0282     if (likely(lw->inv_weight))
0283         return;
0284 
0285     w = scale_load_down(lw->weight);
0286 
0287     if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
0288         lw->inv_weight = 1;
0289     else if (unlikely(!w))
0290         lw->inv_weight = WMULT_CONST;
0291     else
0292         lw->inv_weight = WMULT_CONST / w;
0293 }
0294 
0295 /*
0296  * delta_exec * weight / lw.weight
0297  *   OR
0298  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
0299  *
0300  * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
0301  * we're guaranteed shift stays positive because inv_weight is guaranteed to
0302  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
0303  *
0304  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
0305  * weight/lw.weight <= 1, and therefore our shift will also be positive.
0306  */
0307 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
0308 {
0309     u64 fact = scale_load_down(weight);
0310     u32 fact_hi = (u32)(fact >> 32);
0311     int shift = WMULT_SHIFT;
0312     int fs;
0313 
0314     __update_inv_weight(lw);
0315 
0316     if (unlikely(fact_hi)) {
0317         fs = fls(fact_hi);
0318         shift -= fs;
0319         fact >>= fs;
0320     }
0321 
0322     fact = mul_u32_u32(fact, lw->inv_weight);
0323 
0324     fact_hi = (u32)(fact >> 32);
0325     if (fact_hi) {
0326         fs = fls(fact_hi);
0327         shift -= fs;
0328         fact >>= fs;
0329     }
0330 
0331     return mul_u64_u32_shr(delta_exec, fact, shift);
0332 }
0333 
0334 
0335 const struct sched_class fair_sched_class;
0336 
0337 /**************************************************************
0338  * CFS operations on generic schedulable entities:
0339  */
0340 
0341 #ifdef CONFIG_FAIR_GROUP_SCHED
0342 
0343 /* Walk up scheduling entities hierarchy */
0344 #define for_each_sched_entity(se) \
0345         for (; se; se = se->parent)
0346 
0347 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
0348 {
0349     struct rq *rq = rq_of(cfs_rq);
0350     int cpu = cpu_of(rq);
0351 
0352     if (cfs_rq->on_list)
0353         return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
0354 
0355     cfs_rq->on_list = 1;
0356 
0357     /*
0358      * Ensure we either appear before our parent (if already
0359      * enqueued) or force our parent to appear after us when it is
0360      * enqueued. The fact that we always enqueue bottom-up
0361      * reduces this to two cases and a special case for the root
0362      * cfs_rq. Furthermore, it also means that we will always reset
0363      * tmp_alone_branch either when the branch is connected
0364      * to a tree or when we reach the top of the tree
0365      */
0366     if (cfs_rq->tg->parent &&
0367         cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
0368         /*
0369          * If parent is already on the list, we add the child
0370          * just before. Thanks to circular linked property of
0371          * the list, this means to put the child at the tail
0372          * of the list that starts by parent.
0373          */
0374         list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
0375             &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
0376         /*
0377          * The branch is now connected to its tree so we can
0378          * reset tmp_alone_branch to the beginning of the
0379          * list.
0380          */
0381         rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
0382         return true;
0383     }
0384 
0385     if (!cfs_rq->tg->parent) {
0386         /*
0387          * cfs rq without parent should be put
0388          * at the tail of the list.
0389          */
0390         list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
0391             &rq->leaf_cfs_rq_list);
0392         /*
0393          * We have reach the top of a tree so we can reset
0394          * tmp_alone_branch to the beginning of the list.
0395          */
0396         rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
0397         return true;
0398     }
0399 
0400     /*
0401      * The parent has not already been added so we want to
0402      * make sure that it will be put after us.
0403      * tmp_alone_branch points to the begin of the branch
0404      * where we will add parent.
0405      */
0406     list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
0407     /*
0408      * update tmp_alone_branch to points to the new begin
0409      * of the branch
0410      */
0411     rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
0412     return false;
0413 }
0414 
0415 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
0416 {
0417     if (cfs_rq->on_list) {
0418         struct rq *rq = rq_of(cfs_rq);
0419 
0420         /*
0421          * With cfs_rq being unthrottled/throttled during an enqueue,
0422          * it can happen the tmp_alone_branch points the a leaf that
0423          * we finally want to del. In this case, tmp_alone_branch moves
0424          * to the prev element but it will point to rq->leaf_cfs_rq_list
0425          * at the end of the enqueue.
0426          */
0427         if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
0428             rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
0429 
0430         list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
0431         cfs_rq->on_list = 0;
0432     }
0433 }
0434 
0435 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
0436 {
0437     SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
0438 }
0439 
0440 /* Iterate thr' all leaf cfs_rq's on a runqueue */
0441 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)          \
0442     list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,    \
0443                  leaf_cfs_rq_list)
0444 
0445 /* Do the two (enqueued) entities belong to the same group ? */
0446 static inline struct cfs_rq *
0447 is_same_group(struct sched_entity *se, struct sched_entity *pse)
0448 {
0449     if (se->cfs_rq == pse->cfs_rq)
0450         return se->cfs_rq;
0451 
0452     return NULL;
0453 }
0454 
0455 static inline struct sched_entity *parent_entity(struct sched_entity *se)
0456 {
0457     return se->parent;
0458 }
0459 
0460 static void
0461 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
0462 {
0463     int se_depth, pse_depth;
0464 
0465     /*
0466      * preemption test can be made between sibling entities who are in the
0467      * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
0468      * both tasks until we find their ancestors who are siblings of common
0469      * parent.
0470      */
0471 
0472     /* First walk up until both entities are at same depth */
0473     se_depth = (*se)->depth;
0474     pse_depth = (*pse)->depth;
0475 
0476     while (se_depth > pse_depth) {
0477         se_depth--;
0478         *se = parent_entity(*se);
0479     }
0480 
0481     while (pse_depth > se_depth) {
0482         pse_depth--;
0483         *pse = parent_entity(*pse);
0484     }
0485 
0486     while (!is_same_group(*se, *pse)) {
0487         *se = parent_entity(*se);
0488         *pse = parent_entity(*pse);
0489     }
0490 }
0491 
0492 static int tg_is_idle(struct task_group *tg)
0493 {
0494     return tg->idle > 0;
0495 }
0496 
0497 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
0498 {
0499     return cfs_rq->idle > 0;
0500 }
0501 
0502 static int se_is_idle(struct sched_entity *se)
0503 {
0504     if (entity_is_task(se))
0505         return task_has_idle_policy(task_of(se));
0506     return cfs_rq_is_idle(group_cfs_rq(se));
0507 }
0508 
0509 #else   /* !CONFIG_FAIR_GROUP_SCHED */
0510 
0511 #define for_each_sched_entity(se) \
0512         for (; se; se = NULL)
0513 
0514 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
0515 {
0516     return true;
0517 }
0518 
0519 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
0520 {
0521 }
0522 
0523 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
0524 {
0525 }
0526 
0527 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)  \
0528         for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
0529 
0530 static inline struct sched_entity *parent_entity(struct sched_entity *se)
0531 {
0532     return NULL;
0533 }
0534 
0535 static inline void
0536 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
0537 {
0538 }
0539 
0540 static inline int tg_is_idle(struct task_group *tg)
0541 {
0542     return 0;
0543 }
0544 
0545 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
0546 {
0547     return 0;
0548 }
0549 
0550 static int se_is_idle(struct sched_entity *se)
0551 {
0552     return 0;
0553 }
0554 
0555 #endif  /* CONFIG_FAIR_GROUP_SCHED */
0556 
0557 static __always_inline
0558 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
0559 
0560 /**************************************************************
0561  * Scheduling class tree data structure manipulation methods:
0562  */
0563 
0564 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
0565 {
0566     s64 delta = (s64)(vruntime - max_vruntime);
0567     if (delta > 0)
0568         max_vruntime = vruntime;
0569 
0570     return max_vruntime;
0571 }
0572 
0573 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
0574 {
0575     s64 delta = (s64)(vruntime - min_vruntime);
0576     if (delta < 0)
0577         min_vruntime = vruntime;
0578 
0579     return min_vruntime;
0580 }
0581 
0582 static inline bool entity_before(struct sched_entity *a,
0583                 struct sched_entity *b)
0584 {
0585     return (s64)(a->vruntime - b->vruntime) < 0;
0586 }
0587 
0588 #define __node_2_se(node) \
0589     rb_entry((node), struct sched_entity, run_node)
0590 
0591 static void update_min_vruntime(struct cfs_rq *cfs_rq)
0592 {
0593     struct sched_entity *curr = cfs_rq->curr;
0594     struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
0595 
0596     u64 vruntime = cfs_rq->min_vruntime;
0597 
0598     if (curr) {
0599         if (curr->on_rq)
0600             vruntime = curr->vruntime;
0601         else
0602             curr = NULL;
0603     }
0604 
0605     if (leftmost) { /* non-empty tree */
0606         struct sched_entity *se = __node_2_se(leftmost);
0607 
0608         if (!curr)
0609             vruntime = se->vruntime;
0610         else
0611             vruntime = min_vruntime(vruntime, se->vruntime);
0612     }
0613 
0614     /* ensure we never gain time by being placed backwards. */
0615     u64_u32_store(cfs_rq->min_vruntime,
0616               max_vruntime(cfs_rq->min_vruntime, vruntime));
0617 }
0618 
0619 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
0620 {
0621     return entity_before(__node_2_se(a), __node_2_se(b));
0622 }
0623 
0624 /*
0625  * Enqueue an entity into the rb-tree:
0626  */
0627 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
0628 {
0629     rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less);
0630 }
0631 
0632 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
0633 {
0634     rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
0635 }
0636 
0637 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
0638 {
0639     struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
0640 
0641     if (!left)
0642         return NULL;
0643 
0644     return __node_2_se(left);
0645 }
0646 
0647 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
0648 {
0649     struct rb_node *next = rb_next(&se->run_node);
0650 
0651     if (!next)
0652         return NULL;
0653 
0654     return __node_2_se(next);
0655 }
0656 
0657 #ifdef CONFIG_SCHED_DEBUG
0658 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
0659 {
0660     struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
0661 
0662     if (!last)
0663         return NULL;
0664 
0665     return __node_2_se(last);
0666 }
0667 
0668 /**************************************************************
0669  * Scheduling class statistics methods:
0670  */
0671 
0672 int sched_update_scaling(void)
0673 {
0674     unsigned int factor = get_update_sysctl_factor();
0675 
0676     sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
0677                     sysctl_sched_min_granularity);
0678 
0679 #define WRT_SYSCTL(name) \
0680     (normalized_sysctl_##name = sysctl_##name / (factor))
0681     WRT_SYSCTL(sched_min_granularity);
0682     WRT_SYSCTL(sched_latency);
0683     WRT_SYSCTL(sched_wakeup_granularity);
0684 #undef WRT_SYSCTL
0685 
0686     return 0;
0687 }
0688 #endif
0689 
0690 /*
0691  * delta /= w
0692  */
0693 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
0694 {
0695     if (unlikely(se->load.weight != NICE_0_LOAD))
0696         delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
0697 
0698     return delta;
0699 }
0700 
0701 /*
0702  * The idea is to set a period in which each task runs once.
0703  *
0704  * When there are too many tasks (sched_nr_latency) we have to stretch
0705  * this period because otherwise the slices get too small.
0706  *
0707  * p = (nr <= nl) ? l : l*nr/nl
0708  */
0709 static u64 __sched_period(unsigned long nr_running)
0710 {
0711     if (unlikely(nr_running > sched_nr_latency))
0712         return nr_running * sysctl_sched_min_granularity;
0713     else
0714         return sysctl_sched_latency;
0715 }
0716 
0717 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq);
0718 
0719 /*
0720  * We calculate the wall-time slice from the period by taking a part
0721  * proportional to the weight.
0722  *
0723  * s = p*P[w/rw]
0724  */
0725 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
0726 {
0727     unsigned int nr_running = cfs_rq->nr_running;
0728     struct sched_entity *init_se = se;
0729     unsigned int min_gran;
0730     u64 slice;
0731 
0732     if (sched_feat(ALT_PERIOD))
0733         nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
0734 
0735     slice = __sched_period(nr_running + !se->on_rq);
0736 
0737     for_each_sched_entity(se) {
0738         struct load_weight *load;
0739         struct load_weight lw;
0740         struct cfs_rq *qcfs_rq;
0741 
0742         qcfs_rq = cfs_rq_of(se);
0743         load = &qcfs_rq->load;
0744 
0745         if (unlikely(!se->on_rq)) {
0746             lw = qcfs_rq->load;
0747 
0748             update_load_add(&lw, se->load.weight);
0749             load = &lw;
0750         }
0751         slice = __calc_delta(slice, se->load.weight, load);
0752     }
0753 
0754     if (sched_feat(BASE_SLICE)) {
0755         if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq))
0756             min_gran = sysctl_sched_idle_min_granularity;
0757         else
0758             min_gran = sysctl_sched_min_granularity;
0759 
0760         slice = max_t(u64, slice, min_gran);
0761     }
0762 
0763     return slice;
0764 }
0765 
0766 /*
0767  * We calculate the vruntime slice of a to-be-inserted task.
0768  *
0769  * vs = s/w
0770  */
0771 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
0772 {
0773     return calc_delta_fair(sched_slice(cfs_rq, se), se);
0774 }
0775 
0776 #include "pelt.h"
0777 #ifdef CONFIG_SMP
0778 
0779 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
0780 static unsigned long task_h_load(struct task_struct *p);
0781 static unsigned long capacity_of(int cpu);
0782 
0783 /* Give new sched_entity start runnable values to heavy its load in infant time */
0784 void init_entity_runnable_average(struct sched_entity *se)
0785 {
0786     struct sched_avg *sa = &se->avg;
0787 
0788     memset(sa, 0, sizeof(*sa));
0789 
0790     /*
0791      * Tasks are initialized with full load to be seen as heavy tasks until
0792      * they get a chance to stabilize to their real load level.
0793      * Group entities are initialized with zero load to reflect the fact that
0794      * nothing has been attached to the task group yet.
0795      */
0796     if (entity_is_task(se))
0797         sa->load_avg = scale_load_down(se->load.weight);
0798 
0799     /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
0800 }
0801 
0802 static void attach_entity_cfs_rq(struct sched_entity *se);
0803 
0804 /*
0805  * With new tasks being created, their initial util_avgs are extrapolated
0806  * based on the cfs_rq's current util_avg:
0807  *
0808  *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
0809  *
0810  * However, in many cases, the above util_avg does not give a desired
0811  * value. Moreover, the sum of the util_avgs may be divergent, such
0812  * as when the series is a harmonic series.
0813  *
0814  * To solve this problem, we also cap the util_avg of successive tasks to
0815  * only 1/2 of the left utilization budget:
0816  *
0817  *   util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
0818  *
0819  * where n denotes the nth task and cpu_scale the CPU capacity.
0820  *
0821  * For example, for a CPU with 1024 of capacity, a simplest series from
0822  * the beginning would be like:
0823  *
0824  *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
0825  * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
0826  *
0827  * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
0828  * if util_avg > util_avg_cap.
0829  */
0830 void post_init_entity_util_avg(struct task_struct *p)
0831 {
0832     struct sched_entity *se = &p->se;
0833     struct cfs_rq *cfs_rq = cfs_rq_of(se);
0834     struct sched_avg *sa = &se->avg;
0835     long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
0836     long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
0837 
0838     if (cap > 0) {
0839         if (cfs_rq->avg.util_avg != 0) {
0840             sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
0841             sa->util_avg /= (cfs_rq->avg.load_avg + 1);
0842 
0843             if (sa->util_avg > cap)
0844                 sa->util_avg = cap;
0845         } else {
0846             sa->util_avg = cap;
0847         }
0848     }
0849 
0850     sa->runnable_avg = sa->util_avg;
0851 
0852     if (p->sched_class != &fair_sched_class) {
0853         /*
0854          * For !fair tasks do:
0855          *
0856         update_cfs_rq_load_avg(now, cfs_rq);
0857         attach_entity_load_avg(cfs_rq, se);
0858         switched_from_fair(rq, p);
0859          *
0860          * such that the next switched_to_fair() has the
0861          * expected state.
0862          */
0863         se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
0864         return;
0865     }
0866 
0867     attach_entity_cfs_rq(se);
0868 }
0869 
0870 #else /* !CONFIG_SMP */
0871 void init_entity_runnable_average(struct sched_entity *se)
0872 {
0873 }
0874 void post_init_entity_util_avg(struct task_struct *p)
0875 {
0876 }
0877 static void update_tg_load_avg(struct cfs_rq *cfs_rq)
0878 {
0879 }
0880 #endif /* CONFIG_SMP */
0881 
0882 /*
0883  * Update the current task's runtime statistics.
0884  */
0885 static void update_curr(struct cfs_rq *cfs_rq)
0886 {
0887     struct sched_entity *curr = cfs_rq->curr;
0888     u64 now = rq_clock_task(rq_of(cfs_rq));
0889     u64 delta_exec;
0890 
0891     if (unlikely(!curr))
0892         return;
0893 
0894     delta_exec = now - curr->exec_start;
0895     if (unlikely((s64)delta_exec <= 0))
0896         return;
0897 
0898     curr->exec_start = now;
0899 
0900     if (schedstat_enabled()) {
0901         struct sched_statistics *stats;
0902 
0903         stats = __schedstats_from_se(curr);
0904         __schedstat_set(stats->exec_max,
0905                 max(delta_exec, stats->exec_max));
0906     }
0907 
0908     curr->sum_exec_runtime += delta_exec;
0909     schedstat_add(cfs_rq->exec_clock, delta_exec);
0910 
0911     curr->vruntime += calc_delta_fair(delta_exec, curr);
0912     update_min_vruntime(cfs_rq);
0913 
0914     if (entity_is_task(curr)) {
0915         struct task_struct *curtask = task_of(curr);
0916 
0917         trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
0918         cgroup_account_cputime(curtask, delta_exec);
0919         account_group_exec_runtime(curtask, delta_exec);
0920     }
0921 
0922     account_cfs_rq_runtime(cfs_rq, delta_exec);
0923 }
0924 
0925 static void update_curr_fair(struct rq *rq)
0926 {
0927     update_curr(cfs_rq_of(&rq->curr->se));
0928 }
0929 
0930 static inline void
0931 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
0932 {
0933     struct sched_statistics *stats;
0934     struct task_struct *p = NULL;
0935 
0936     if (!schedstat_enabled())
0937         return;
0938 
0939     stats = __schedstats_from_se(se);
0940 
0941     if (entity_is_task(se))
0942         p = task_of(se);
0943 
0944     __update_stats_wait_start(rq_of(cfs_rq), p, stats);
0945 }
0946 
0947 static inline void
0948 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
0949 {
0950     struct sched_statistics *stats;
0951     struct task_struct *p = NULL;
0952 
0953     if (!schedstat_enabled())
0954         return;
0955 
0956     stats = __schedstats_from_se(se);
0957 
0958     /*
0959      * When the sched_schedstat changes from 0 to 1, some sched se
0960      * maybe already in the runqueue, the se->statistics.wait_start
0961      * will be 0.So it will let the delta wrong. We need to avoid this
0962      * scenario.
0963      */
0964     if (unlikely(!schedstat_val(stats->wait_start)))
0965         return;
0966 
0967     if (entity_is_task(se))
0968         p = task_of(se);
0969 
0970     __update_stats_wait_end(rq_of(cfs_rq), p, stats);
0971 }
0972 
0973 static inline void
0974 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
0975 {
0976     struct sched_statistics *stats;
0977     struct task_struct *tsk = NULL;
0978 
0979     if (!schedstat_enabled())
0980         return;
0981 
0982     stats = __schedstats_from_se(se);
0983 
0984     if (entity_is_task(se))
0985         tsk = task_of(se);
0986 
0987     __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats);
0988 }
0989 
0990 /*
0991  * Task is being enqueued - update stats:
0992  */
0993 static inline void
0994 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
0995 {
0996     if (!schedstat_enabled())
0997         return;
0998 
0999     /*
1000      * Are we enqueueing a waiting task? (for current tasks
1001      * a dequeue/enqueue event is a NOP)
1002      */
1003     if (se != cfs_rq->curr)
1004         update_stats_wait_start_fair(cfs_rq, se);
1005 
1006     if (flags & ENQUEUE_WAKEUP)
1007         update_stats_enqueue_sleeper_fair(cfs_rq, se);
1008 }
1009 
1010 static inline void
1011 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1012 {
1013 
1014     if (!schedstat_enabled())
1015         return;
1016 
1017     /*
1018      * Mark the end of the wait period if dequeueing a
1019      * waiting task:
1020      */
1021     if (se != cfs_rq->curr)
1022         update_stats_wait_end_fair(cfs_rq, se);
1023 
1024     if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1025         struct task_struct *tsk = task_of(se);
1026         unsigned int state;
1027 
1028         /* XXX racy against TTWU */
1029         state = READ_ONCE(tsk->__state);
1030         if (state & TASK_INTERRUPTIBLE)
1031             __schedstat_set(tsk->stats.sleep_start,
1032                       rq_clock(rq_of(cfs_rq)));
1033         if (state & TASK_UNINTERRUPTIBLE)
1034             __schedstat_set(tsk->stats.block_start,
1035                       rq_clock(rq_of(cfs_rq)));
1036     }
1037 }
1038 
1039 /*
1040  * We are picking a new current task - update its stats:
1041  */
1042 static inline void
1043 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1044 {
1045     /*
1046      * We are starting a new run period:
1047      */
1048     se->exec_start = rq_clock_task(rq_of(cfs_rq));
1049 }
1050 
1051 /**************************************************
1052  * Scheduling class queueing methods:
1053  */
1054 
1055 #ifdef CONFIG_NUMA
1056 #define NUMA_IMBALANCE_MIN 2
1057 
1058 static inline long
1059 adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
1060 {
1061     /*
1062      * Allow a NUMA imbalance if busy CPUs is less than the maximum
1063      * threshold. Above this threshold, individual tasks may be contending
1064      * for both memory bandwidth and any shared HT resources.  This is an
1065      * approximation as the number of running tasks may not be related to
1066      * the number of busy CPUs due to sched_setaffinity.
1067      */
1068     if (dst_running > imb_numa_nr)
1069         return imbalance;
1070 
1071     /*
1072      * Allow a small imbalance based on a simple pair of communicating
1073      * tasks that remain local when the destination is lightly loaded.
1074      */
1075     if (imbalance <= NUMA_IMBALANCE_MIN)
1076         return 0;
1077 
1078     return imbalance;
1079 }
1080 #endif /* CONFIG_NUMA */
1081 
1082 #ifdef CONFIG_NUMA_BALANCING
1083 /*
1084  * Approximate time to scan a full NUMA task in ms. The task scan period is
1085  * calculated based on the tasks virtual memory size and
1086  * numa_balancing_scan_size.
1087  */
1088 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1089 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1090 
1091 /* Portion of address space to scan in MB */
1092 unsigned int sysctl_numa_balancing_scan_size = 256;
1093 
1094 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1095 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1096 
1097 struct numa_group {
1098     refcount_t refcount;
1099 
1100     spinlock_t lock; /* nr_tasks, tasks */
1101     int nr_tasks;
1102     pid_t gid;
1103     int active_nodes;
1104 
1105     struct rcu_head rcu;
1106     unsigned long total_faults;
1107     unsigned long max_faults_cpu;
1108     /*
1109      * faults[] array is split into two regions: faults_mem and faults_cpu.
1110      *
1111      * Faults_cpu is used to decide whether memory should move
1112      * towards the CPU. As a consequence, these stats are weighted
1113      * more by CPU use than by memory faults.
1114      */
1115     unsigned long faults[];
1116 };
1117 
1118 /*
1119  * For functions that can be called in multiple contexts that permit reading
1120  * ->numa_group (see struct task_struct for locking rules).
1121  */
1122 static struct numa_group *deref_task_numa_group(struct task_struct *p)
1123 {
1124     return rcu_dereference_check(p->numa_group, p == current ||
1125         (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
1126 }
1127 
1128 static struct numa_group *deref_curr_numa_group(struct task_struct *p)
1129 {
1130     return rcu_dereference_protected(p->numa_group, p == current);
1131 }
1132 
1133 static inline unsigned long group_faults_priv(struct numa_group *ng);
1134 static inline unsigned long group_faults_shared(struct numa_group *ng);
1135 
1136 static unsigned int task_nr_scan_windows(struct task_struct *p)
1137 {
1138     unsigned long rss = 0;
1139     unsigned long nr_scan_pages;
1140 
1141     /*
1142      * Calculations based on RSS as non-present and empty pages are skipped
1143      * by the PTE scanner and NUMA hinting faults should be trapped based
1144      * on resident pages
1145      */
1146     nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1147     rss = get_mm_rss(p->mm);
1148     if (!rss)
1149         rss = nr_scan_pages;
1150 
1151     rss = round_up(rss, nr_scan_pages);
1152     return rss / nr_scan_pages;
1153 }
1154 
1155 /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1156 #define MAX_SCAN_WINDOW 2560
1157 
1158 static unsigned int task_scan_min(struct task_struct *p)
1159 {
1160     unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1161     unsigned int scan, floor;
1162     unsigned int windows = 1;
1163 
1164     if (scan_size < MAX_SCAN_WINDOW)
1165         windows = MAX_SCAN_WINDOW / scan_size;
1166     floor = 1000 / windows;
1167 
1168     scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1169     return max_t(unsigned int, floor, scan);
1170 }
1171 
1172 static unsigned int task_scan_start(struct task_struct *p)
1173 {
1174     unsigned long smin = task_scan_min(p);
1175     unsigned long period = smin;
1176     struct numa_group *ng;
1177 
1178     /* Scale the maximum scan period with the amount of shared memory. */
1179     rcu_read_lock();
1180     ng = rcu_dereference(p->numa_group);
1181     if (ng) {
1182         unsigned long shared = group_faults_shared(ng);
1183         unsigned long private = group_faults_priv(ng);
1184 
1185         period *= refcount_read(&ng->refcount);
1186         period *= shared + 1;
1187         period /= private + shared + 1;
1188     }
1189     rcu_read_unlock();
1190 
1191     return max(smin, period);
1192 }
1193 
1194 static unsigned int task_scan_max(struct task_struct *p)
1195 {
1196     unsigned long smin = task_scan_min(p);
1197     unsigned long smax;
1198     struct numa_group *ng;
1199 
1200     /* Watch for min being lower than max due to floor calculations */
1201     smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1202 
1203     /* Scale the maximum scan period with the amount of shared memory. */
1204     ng = deref_curr_numa_group(p);
1205     if (ng) {
1206         unsigned long shared = group_faults_shared(ng);
1207         unsigned long private = group_faults_priv(ng);
1208         unsigned long period = smax;
1209 
1210         period *= refcount_read(&ng->refcount);
1211         period *= shared + 1;
1212         period /= private + shared + 1;
1213 
1214         smax = max(smax, period);
1215     }
1216 
1217     return max(smin, smax);
1218 }
1219 
1220 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1221 {
1222     rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
1223     rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1224 }
1225 
1226 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1227 {
1228     rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
1229     rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1230 }
1231 
1232 /* Shared or private faults. */
1233 #define NR_NUMA_HINT_FAULT_TYPES 2
1234 
1235 /* Memory and CPU locality */
1236 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1237 
1238 /* Averaged statistics, and temporary buffers. */
1239 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1240 
1241 pid_t task_numa_group_id(struct task_struct *p)
1242 {
1243     struct numa_group *ng;
1244     pid_t gid = 0;
1245 
1246     rcu_read_lock();
1247     ng = rcu_dereference(p->numa_group);
1248     if (ng)
1249         gid = ng->gid;
1250     rcu_read_unlock();
1251 
1252     return gid;
1253 }
1254 
1255 /*
1256  * The averaged statistics, shared & private, memory & CPU,
1257  * occupy the first half of the array. The second half of the
1258  * array is for current counters, which are averaged into the
1259  * first set by task_numa_placement.
1260  */
1261 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1262 {
1263     return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1264 }
1265 
1266 static inline unsigned long task_faults(struct task_struct *p, int nid)
1267 {
1268     if (!p->numa_faults)
1269         return 0;
1270 
1271     return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1272         p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1273 }
1274 
1275 static inline unsigned long group_faults(struct task_struct *p, int nid)
1276 {
1277     struct numa_group *ng = deref_task_numa_group(p);
1278 
1279     if (!ng)
1280         return 0;
1281 
1282     return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1283         ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1284 }
1285 
1286 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1287 {
1288     return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] +
1289         group->faults[task_faults_idx(NUMA_CPU, nid, 1)];
1290 }
1291 
1292 static inline unsigned long group_faults_priv(struct numa_group *ng)
1293 {
1294     unsigned long faults = 0;
1295     int node;
1296 
1297     for_each_online_node(node) {
1298         faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1299     }
1300 
1301     return faults;
1302 }
1303 
1304 static inline unsigned long group_faults_shared(struct numa_group *ng)
1305 {
1306     unsigned long faults = 0;
1307     int node;
1308 
1309     for_each_online_node(node) {
1310         faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1311     }
1312 
1313     return faults;
1314 }
1315 
1316 /*
1317  * A node triggering more than 1/3 as many NUMA faults as the maximum is
1318  * considered part of a numa group's pseudo-interleaving set. Migrations
1319  * between these nodes are slowed down, to allow things to settle down.
1320  */
1321 #define ACTIVE_NODE_FRACTION 3
1322 
1323 static bool numa_is_active_node(int nid, struct numa_group *ng)
1324 {
1325     return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1326 }
1327 
1328 /* Handle placement on systems where not all nodes are directly connected. */
1329 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1330                     int lim_dist, bool task)
1331 {
1332     unsigned long score = 0;
1333     int node, max_dist;
1334 
1335     /*
1336      * All nodes are directly connected, and the same distance
1337      * from each other. No need for fancy placement algorithms.
1338      */
1339     if (sched_numa_topology_type == NUMA_DIRECT)
1340         return 0;
1341 
1342     /* sched_max_numa_distance may be changed in parallel. */
1343     max_dist = READ_ONCE(sched_max_numa_distance);
1344     /*
1345      * This code is called for each node, introducing N^2 complexity,
1346      * which should be ok given the number of nodes rarely exceeds 8.
1347      */
1348     for_each_online_node(node) {
1349         unsigned long faults;
1350         int dist = node_distance(nid, node);
1351 
1352         /*
1353          * The furthest away nodes in the system are not interesting
1354          * for placement; nid was already counted.
1355          */
1356         if (dist >= max_dist || node == nid)
1357             continue;
1358 
1359         /*
1360          * On systems with a backplane NUMA topology, compare groups
1361          * of nodes, and move tasks towards the group with the most
1362          * memory accesses. When comparing two nodes at distance
1363          * "hoplimit", only nodes closer by than "hoplimit" are part
1364          * of each group. Skip other nodes.
1365          */
1366         if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= lim_dist)
1367             continue;
1368 
1369         /* Add up the faults from nearby nodes. */
1370         if (task)
1371             faults = task_faults(p, node);
1372         else
1373             faults = group_faults(p, node);
1374 
1375         /*
1376          * On systems with a glueless mesh NUMA topology, there are
1377          * no fixed "groups of nodes". Instead, nodes that are not
1378          * directly connected bounce traffic through intermediate
1379          * nodes; a numa_group can occupy any set of nodes.
1380          * The further away a node is, the less the faults count.
1381          * This seems to result in good task placement.
1382          */
1383         if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1384             faults *= (max_dist - dist);
1385             faults /= (max_dist - LOCAL_DISTANCE);
1386         }
1387 
1388         score += faults;
1389     }
1390 
1391     return score;
1392 }
1393 
1394 /*
1395  * These return the fraction of accesses done by a particular task, or
1396  * task group, on a particular numa node.  The group weight is given a
1397  * larger multiplier, in order to group tasks together that are almost
1398  * evenly spread out between numa nodes.
1399  */
1400 static inline unsigned long task_weight(struct task_struct *p, int nid,
1401                     int dist)
1402 {
1403     unsigned long faults, total_faults;
1404 
1405     if (!p->numa_faults)
1406         return 0;
1407 
1408     total_faults = p->total_numa_faults;
1409 
1410     if (!total_faults)
1411         return 0;
1412 
1413     faults = task_faults(p, nid);
1414     faults += score_nearby_nodes(p, nid, dist, true);
1415 
1416     return 1000 * faults / total_faults;
1417 }
1418 
1419 static inline unsigned long group_weight(struct task_struct *p, int nid,
1420                      int dist)
1421 {
1422     struct numa_group *ng = deref_task_numa_group(p);
1423     unsigned long faults, total_faults;
1424 
1425     if (!ng)
1426         return 0;
1427 
1428     total_faults = ng->total_faults;
1429 
1430     if (!total_faults)
1431         return 0;
1432 
1433     faults = group_faults(p, nid);
1434     faults += score_nearby_nodes(p, nid, dist, false);
1435 
1436     return 1000 * faults / total_faults;
1437 }
1438 
1439 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1440                 int src_nid, int dst_cpu)
1441 {
1442     struct numa_group *ng = deref_curr_numa_group(p);
1443     int dst_nid = cpu_to_node(dst_cpu);
1444     int last_cpupid, this_cpupid;
1445 
1446     this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1447     last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1448 
1449     /*
1450      * Allow first faults or private faults to migrate immediately early in
1451      * the lifetime of a task. The magic number 4 is based on waiting for
1452      * two full passes of the "multi-stage node selection" test that is
1453      * executed below.
1454      */
1455     if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
1456         (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
1457         return true;
1458 
1459     /*
1460      * Multi-stage node selection is used in conjunction with a periodic
1461      * migration fault to build a temporal task<->page relation. By using
1462      * a two-stage filter we remove short/unlikely relations.
1463      *
1464      * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1465      * a task's usage of a particular page (n_p) per total usage of this
1466      * page (n_t) (in a given time-span) to a probability.
1467      *
1468      * Our periodic faults will sample this probability and getting the
1469      * same result twice in a row, given these samples are fully
1470      * independent, is then given by P(n)^2, provided our sample period
1471      * is sufficiently short compared to the usage pattern.
1472      *
1473      * This quadric squishes small probabilities, making it less likely we
1474      * act on an unlikely task<->page relation.
1475      */
1476     if (!cpupid_pid_unset(last_cpupid) &&
1477                 cpupid_to_nid(last_cpupid) != dst_nid)
1478         return false;
1479 
1480     /* Always allow migrate on private faults */
1481     if (cpupid_match_pid(p, last_cpupid))
1482         return true;
1483 
1484     /* A shared fault, but p->numa_group has not been set up yet. */
1485     if (!ng)
1486         return true;
1487 
1488     /*
1489      * Destination node is much more heavily used than the source
1490      * node? Allow migration.
1491      */
1492     if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1493                     ACTIVE_NODE_FRACTION)
1494         return true;
1495 
1496     /*
1497      * Distribute memory according to CPU & memory use on each node,
1498      * with 3/4 hysteresis to avoid unnecessary memory migrations:
1499      *
1500      * faults_cpu(dst)   3   faults_cpu(src)
1501      * --------------- * - > ---------------
1502      * faults_mem(dst)   4   faults_mem(src)
1503      */
1504     return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1505            group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1506 }
1507 
1508 /*
1509  * 'numa_type' describes the node at the moment of load balancing.
1510  */
1511 enum numa_type {
1512     /* The node has spare capacity that can be used to run more tasks.  */
1513     node_has_spare = 0,
1514     /*
1515      * The node is fully used and the tasks don't compete for more CPU
1516      * cycles. Nevertheless, some tasks might wait before running.
1517      */
1518     node_fully_busy,
1519     /*
1520      * The node is overloaded and can't provide expected CPU cycles to all
1521      * tasks.
1522      */
1523     node_overloaded
1524 };
1525 
1526 /* Cached statistics for all CPUs within a node */
1527 struct numa_stats {
1528     unsigned long load;
1529     unsigned long runnable;
1530     unsigned long util;
1531     /* Total compute capacity of CPUs on a node */
1532     unsigned long compute_capacity;
1533     unsigned int nr_running;
1534     unsigned int weight;
1535     enum numa_type node_type;
1536     int idle_cpu;
1537 };
1538 
1539 static inline bool is_core_idle(int cpu)
1540 {
1541 #ifdef CONFIG_SCHED_SMT
1542     int sibling;
1543 
1544     for_each_cpu(sibling, cpu_smt_mask(cpu)) {
1545         if (cpu == sibling)
1546             continue;
1547 
1548         if (!idle_cpu(sibling))
1549             return false;
1550     }
1551 #endif
1552 
1553     return true;
1554 }
1555 
1556 struct task_numa_env {
1557     struct task_struct *p;
1558 
1559     int src_cpu, src_nid;
1560     int dst_cpu, dst_nid;
1561     int imb_numa_nr;
1562 
1563     struct numa_stats src_stats, dst_stats;
1564 
1565     int imbalance_pct;
1566     int dist;
1567 
1568     struct task_struct *best_task;
1569     long best_imp;
1570     int best_cpu;
1571 };
1572 
1573 static unsigned long cpu_load(struct rq *rq);
1574 static unsigned long cpu_runnable(struct rq *rq);
1575 
1576 static inline enum
1577 numa_type numa_classify(unsigned int imbalance_pct,
1578              struct numa_stats *ns)
1579 {
1580     if ((ns->nr_running > ns->weight) &&
1581         (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
1582          ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
1583         return node_overloaded;
1584 
1585     if ((ns->nr_running < ns->weight) ||
1586         (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
1587          ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
1588         return node_has_spare;
1589 
1590     return node_fully_busy;
1591 }
1592 
1593 #ifdef CONFIG_SCHED_SMT
1594 /* Forward declarations of select_idle_sibling helpers */
1595 static inline bool test_idle_cores(int cpu, bool def);
1596 static inline int numa_idle_core(int idle_core, int cpu)
1597 {
1598     if (!static_branch_likely(&sched_smt_present) ||
1599         idle_core >= 0 || !test_idle_cores(cpu, false))
1600         return idle_core;
1601 
1602     /*
1603      * Prefer cores instead of packing HT siblings
1604      * and triggering future load balancing.
1605      */
1606     if (is_core_idle(cpu))
1607         idle_core = cpu;
1608 
1609     return idle_core;
1610 }
1611 #else
1612 static inline int numa_idle_core(int idle_core, int cpu)
1613 {
1614     return idle_core;
1615 }
1616 #endif
1617 
1618 /*
1619  * Gather all necessary information to make NUMA balancing placement
1620  * decisions that are compatible with standard load balancer. This
1621  * borrows code and logic from update_sg_lb_stats but sharing a
1622  * common implementation is impractical.
1623  */
1624 static void update_numa_stats(struct task_numa_env *env,
1625                   struct numa_stats *ns, int nid,
1626                   bool find_idle)
1627 {
1628     int cpu, idle_core = -1;
1629 
1630     memset(ns, 0, sizeof(*ns));
1631     ns->idle_cpu = -1;
1632 
1633     rcu_read_lock();
1634     for_each_cpu(cpu, cpumask_of_node(nid)) {
1635         struct rq *rq = cpu_rq(cpu);
1636 
1637         ns->load += cpu_load(rq);
1638         ns->runnable += cpu_runnable(rq);
1639         ns->util += cpu_util_cfs(cpu);
1640         ns->nr_running += rq->cfs.h_nr_running;
1641         ns->compute_capacity += capacity_of(cpu);
1642 
1643         if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
1644             if (READ_ONCE(rq->numa_migrate_on) ||
1645                 !cpumask_test_cpu(cpu, env->p->cpus_ptr))
1646                 continue;
1647 
1648             if (ns->idle_cpu == -1)
1649                 ns->idle_cpu = cpu;
1650 
1651             idle_core = numa_idle_core(idle_core, cpu);
1652         }
1653     }
1654     rcu_read_unlock();
1655 
1656     ns->weight = cpumask_weight(cpumask_of_node(nid));
1657 
1658     ns->node_type = numa_classify(env->imbalance_pct, ns);
1659 
1660     if (idle_core >= 0)
1661         ns->idle_cpu = idle_core;
1662 }
1663 
1664 static void task_numa_assign(struct task_numa_env *env,
1665                  struct task_struct *p, long imp)
1666 {
1667     struct rq *rq = cpu_rq(env->dst_cpu);
1668 
1669     /* Check if run-queue part of active NUMA balance. */
1670     if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
1671         int cpu;
1672         int start = env->dst_cpu;
1673 
1674         /* Find alternative idle CPU. */
1675         for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) {
1676             if (cpu == env->best_cpu || !idle_cpu(cpu) ||
1677                 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
1678                 continue;
1679             }
1680 
1681             env->dst_cpu = cpu;
1682             rq = cpu_rq(env->dst_cpu);
1683             if (!xchg(&rq->numa_migrate_on, 1))
1684                 goto assign;
1685         }
1686 
1687         /* Failed to find an alternative idle CPU */
1688         return;
1689     }
1690 
1691 assign:
1692     /*
1693      * Clear previous best_cpu/rq numa-migrate flag, since task now
1694      * found a better CPU to move/swap.
1695      */
1696     if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
1697         rq = cpu_rq(env->best_cpu);
1698         WRITE_ONCE(rq->numa_migrate_on, 0);
1699     }
1700 
1701     if (env->best_task)
1702         put_task_struct(env->best_task);
1703     if (p)
1704         get_task_struct(p);
1705 
1706     env->best_task = p;
1707     env->best_imp = imp;
1708     env->best_cpu = env->dst_cpu;
1709 }
1710 
1711 static bool load_too_imbalanced(long src_load, long dst_load,
1712                 struct task_numa_env *env)
1713 {
1714     long imb, old_imb;
1715     long orig_src_load, orig_dst_load;
1716     long src_capacity, dst_capacity;
1717 
1718     /*
1719      * The load is corrected for the CPU capacity available on each node.
1720      *
1721      * src_load        dst_load
1722      * ------------ vs ---------
1723      * src_capacity    dst_capacity
1724      */
1725     src_capacity = env->src_stats.compute_capacity;
1726     dst_capacity = env->dst_stats.compute_capacity;
1727 
1728     imb = abs(dst_load * src_capacity - src_load * dst_capacity);
1729 
1730     orig_src_load = env->src_stats.load;
1731     orig_dst_load = env->dst_stats.load;
1732 
1733     old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
1734 
1735     /* Would this change make things worse? */
1736     return (imb > old_imb);
1737 }
1738 
1739 /*
1740  * Maximum NUMA importance can be 1998 (2*999);
1741  * SMALLIMP @ 30 would be close to 1998/64.
1742  * Used to deter task migration.
1743  */
1744 #define SMALLIMP    30
1745 
1746 /*
1747  * This checks if the overall compute and NUMA accesses of the system would
1748  * be improved if the source tasks was migrated to the target dst_cpu taking
1749  * into account that it might be best if task running on the dst_cpu should
1750  * be exchanged with the source task
1751  */
1752 static bool task_numa_compare(struct task_numa_env *env,
1753                   long taskimp, long groupimp, bool maymove)
1754 {
1755     struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
1756     struct rq *dst_rq = cpu_rq(env->dst_cpu);
1757     long imp = p_ng ? groupimp : taskimp;
1758     struct task_struct *cur;
1759     long src_load, dst_load;
1760     int dist = env->dist;
1761     long moveimp = imp;
1762     long load;
1763     bool stopsearch = false;
1764 
1765     if (READ_ONCE(dst_rq->numa_migrate_on))
1766         return false;
1767 
1768     rcu_read_lock();
1769     cur = rcu_dereference(dst_rq->curr);
1770     if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
1771         cur = NULL;
1772 
1773     /*
1774      * Because we have preemption enabled we can get migrated around and
1775      * end try selecting ourselves (current == env->p) as a swap candidate.
1776      */
1777     if (cur == env->p) {
1778         stopsearch = true;
1779         goto unlock;
1780     }
1781 
1782     if (!cur) {
1783         if (maymove && moveimp >= env->best_imp)
1784             goto assign;
1785         else
1786             goto unlock;
1787     }
1788 
1789     /* Skip this swap candidate if cannot move to the source cpu. */
1790     if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
1791         goto unlock;
1792 
1793     /*
1794      * Skip this swap candidate if it is not moving to its preferred
1795      * node and the best task is.
1796      */
1797     if (env->best_task &&
1798         env->best_task->numa_preferred_nid == env->src_nid &&
1799         cur->numa_preferred_nid != env->src_nid) {
1800         goto unlock;
1801     }
1802 
1803     /*
1804      * "imp" is the fault differential for the source task between the
1805      * source and destination node. Calculate the total differential for
1806      * the source task and potential destination task. The more negative
1807      * the value is, the more remote accesses that would be expected to
1808      * be incurred if the tasks were swapped.
1809      *
1810      * If dst and source tasks are in the same NUMA group, or not
1811      * in any group then look only at task weights.
1812      */
1813     cur_ng = rcu_dereference(cur->numa_group);
1814     if (cur_ng == p_ng) {
1815         /*
1816          * Do not swap within a group or between tasks that have
1817          * no group if there is spare capacity. Swapping does
1818          * not address the load imbalance and helps one task at
1819          * the cost of punishing another.
1820          */
1821         if (env->dst_stats.node_type == node_has_spare)
1822             goto unlock;
1823 
1824         imp = taskimp + task_weight(cur, env->src_nid, dist) -
1825               task_weight(cur, env->dst_nid, dist);
1826         /*
1827          * Add some hysteresis to prevent swapping the
1828          * tasks within a group over tiny differences.
1829          */
1830         if (cur_ng)
1831             imp -= imp / 16;
1832     } else {
1833         /*
1834          * Compare the group weights. If a task is all by itself
1835          * (not part of a group), use the task weight instead.
1836          */
1837         if (cur_ng && p_ng)
1838             imp += group_weight(cur, env->src_nid, dist) -
1839                    group_weight(cur, env->dst_nid, dist);
1840         else
1841             imp += task_weight(cur, env->src_nid, dist) -
1842                    task_weight(cur, env->dst_nid, dist);
1843     }
1844 
1845     /* Discourage picking a task already on its preferred node */
1846     if (cur->numa_preferred_nid == env->dst_nid)
1847         imp -= imp / 16;
1848 
1849     /*
1850      * Encourage picking a task that moves to its preferred node.
1851      * This potentially makes imp larger than it's maximum of
1852      * 1998 (see SMALLIMP and task_weight for why) but in this
1853      * case, it does not matter.
1854      */
1855     if (cur->numa_preferred_nid == env->src_nid)
1856         imp += imp / 8;
1857 
1858     if (maymove && moveimp > imp && moveimp > env->best_imp) {
1859         imp = moveimp;
1860         cur = NULL;
1861         goto assign;
1862     }
1863 
1864     /*
1865      * Prefer swapping with a task moving to its preferred node over a
1866      * task that is not.
1867      */
1868     if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
1869         env->best_task->numa_preferred_nid != env->src_nid) {
1870         goto assign;
1871     }
1872 
1873     /*
1874      * If the NUMA importance is less than SMALLIMP,
1875      * task migration might only result in ping pong
1876      * of tasks and also hurt performance due to cache
1877      * misses.
1878      */
1879     if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
1880         goto unlock;
1881 
1882     /*
1883      * In the overloaded case, try and keep the load balanced.
1884      */
1885     load = task_h_load(env->p) - task_h_load(cur);
1886     if (!load)
1887         goto assign;
1888 
1889     dst_load = env->dst_stats.load + load;
1890     src_load = env->src_stats.load - load;
1891 
1892     if (load_too_imbalanced(src_load, dst_load, env))
1893         goto unlock;
1894 
1895 assign:
1896     /* Evaluate an idle CPU for a task numa move. */
1897     if (!cur) {
1898         int cpu = env->dst_stats.idle_cpu;
1899 
1900         /* Nothing cached so current CPU went idle since the search. */
1901         if (cpu < 0)
1902             cpu = env->dst_cpu;
1903 
1904         /*
1905          * If the CPU is no longer truly idle and the previous best CPU
1906          * is, keep using it.
1907          */
1908         if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
1909             idle_cpu(env->best_cpu)) {
1910             cpu = env->best_cpu;
1911         }
1912 
1913         env->dst_cpu = cpu;
1914     }
1915 
1916     task_numa_assign(env, cur, imp);
1917 
1918     /*
1919      * If a move to idle is allowed because there is capacity or load
1920      * balance improves then stop the search. While a better swap
1921      * candidate may exist, a search is not free.
1922      */
1923     if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
1924         stopsearch = true;
1925 
1926     /*
1927      * If a swap candidate must be identified and the current best task
1928      * moves its preferred node then stop the search.
1929      */
1930     if (!maymove && env->best_task &&
1931         env->best_task->numa_preferred_nid == env->src_nid) {
1932         stopsearch = true;
1933     }
1934 unlock:
1935     rcu_read_unlock();
1936 
1937     return stopsearch;
1938 }
1939 
1940 static void task_numa_find_cpu(struct task_numa_env *env,
1941                 long taskimp, long groupimp)
1942 {
1943     bool maymove = false;
1944     int cpu;
1945 
1946     /*
1947      * If dst node has spare capacity, then check if there is an
1948      * imbalance that would be overruled by the load balancer.
1949      */
1950     if (env->dst_stats.node_type == node_has_spare) {
1951         unsigned int imbalance;
1952         int src_running, dst_running;
1953 
1954         /*
1955          * Would movement cause an imbalance? Note that if src has
1956          * more running tasks that the imbalance is ignored as the
1957          * move improves the imbalance from the perspective of the
1958          * CPU load balancer.
1959          * */
1960         src_running = env->src_stats.nr_running - 1;
1961         dst_running = env->dst_stats.nr_running + 1;
1962         imbalance = max(0, dst_running - src_running);
1963         imbalance = adjust_numa_imbalance(imbalance, dst_running,
1964                           env->imb_numa_nr);
1965 
1966         /* Use idle CPU if there is no imbalance */
1967         if (!imbalance) {
1968             maymove = true;
1969             if (env->dst_stats.idle_cpu >= 0) {
1970                 env->dst_cpu = env->dst_stats.idle_cpu;
1971                 task_numa_assign(env, NULL, 0);
1972                 return;
1973             }
1974         }
1975     } else {
1976         long src_load, dst_load, load;
1977         /*
1978          * If the improvement from just moving env->p direction is better
1979          * than swapping tasks around, check if a move is possible.
1980          */
1981         load = task_h_load(env->p);
1982         dst_load = env->dst_stats.load + load;
1983         src_load = env->src_stats.load - load;
1984         maymove = !load_too_imbalanced(src_load, dst_load, env);
1985     }
1986 
1987     for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1988         /* Skip this CPU if the source task cannot migrate */
1989         if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
1990             continue;
1991 
1992         env->dst_cpu = cpu;
1993         if (task_numa_compare(env, taskimp, groupimp, maymove))
1994             break;
1995     }
1996 }
1997 
1998 static int task_numa_migrate(struct task_struct *p)
1999 {
2000     struct task_numa_env env = {
2001         .p = p,
2002 
2003         .src_cpu = task_cpu(p),
2004         .src_nid = task_node(p),
2005 
2006         .imbalance_pct = 112,
2007 
2008         .best_task = NULL,
2009         .best_imp = 0,
2010         .best_cpu = -1,
2011     };
2012     unsigned long taskweight, groupweight;
2013     struct sched_domain *sd;
2014     long taskimp, groupimp;
2015     struct numa_group *ng;
2016     struct rq *best_rq;
2017     int nid, ret, dist;
2018 
2019     /*
2020      * Pick the lowest SD_NUMA domain, as that would have the smallest
2021      * imbalance and would be the first to start moving tasks about.
2022      *
2023      * And we want to avoid any moving of tasks about, as that would create
2024      * random movement of tasks -- counter the numa conditions we're trying
2025      * to satisfy here.
2026      */
2027     rcu_read_lock();
2028     sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
2029     if (sd) {
2030         env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
2031         env.imb_numa_nr = sd->imb_numa_nr;
2032     }
2033     rcu_read_unlock();
2034 
2035     /*
2036      * Cpusets can break the scheduler domain tree into smaller
2037      * balance domains, some of which do not cross NUMA boundaries.
2038      * Tasks that are "trapped" in such domains cannot be migrated
2039      * elsewhere, so there is no point in (re)trying.
2040      */
2041     if (unlikely(!sd)) {
2042         sched_setnuma(p, task_node(p));
2043         return -EINVAL;
2044     }
2045 
2046     env.dst_nid = p->numa_preferred_nid;
2047     dist = env.dist = node_distance(env.src_nid, env.dst_nid);
2048     taskweight = task_weight(p, env.src_nid, dist);
2049     groupweight = group_weight(p, env.src_nid, dist);
2050     update_numa_stats(&env, &env.src_stats, env.src_nid, false);
2051     taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
2052     groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2053     update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2054 
2055     /* Try to find a spot on the preferred nid. */
2056     task_numa_find_cpu(&env, taskimp, groupimp);
2057 
2058     /*
2059      * Look at other nodes in these cases:
2060      * - there is no space available on the preferred_nid
2061      * - the task is part of a numa_group that is interleaved across
2062      *   multiple NUMA nodes; in order to better consolidate the group,
2063      *   we need to check other locations.
2064      */
2065     ng = deref_curr_numa_group(p);
2066     if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
2067         for_each_node_state(nid, N_CPU) {
2068             if (nid == env.src_nid || nid == p->numa_preferred_nid)
2069                 continue;
2070 
2071             dist = node_distance(env.src_nid, env.dst_nid);
2072             if (sched_numa_topology_type == NUMA_BACKPLANE &&
2073                         dist != env.dist) {
2074                 taskweight = task_weight(p, env.src_nid, dist);
2075                 groupweight = group_weight(p, env.src_nid, dist);
2076             }
2077 
2078             /* Only consider nodes where both task and groups benefit */
2079             taskimp = task_weight(p, nid, dist) - taskweight;
2080             groupimp = group_weight(p, nid, dist) - groupweight;
2081             if (taskimp < 0 && groupimp < 0)
2082                 continue;
2083 
2084             env.dist = dist;
2085             env.dst_nid = nid;
2086             update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2087             task_numa_find_cpu(&env, taskimp, groupimp);
2088         }
2089     }
2090 
2091     /*
2092      * If the task is part of a workload that spans multiple NUMA nodes,
2093      * and is migrating into one of the workload's active nodes, remember
2094      * this node as the task's preferred numa node, so the workload can
2095      * settle down.
2096      * A task that migrated to a second choice node will be better off
2097      * trying for a better one later. Do not set the preferred node here.
2098      */
2099     if (ng) {
2100         if (env.best_cpu == -1)
2101             nid = env.src_nid;
2102         else
2103             nid = cpu_to_node(env.best_cpu);
2104 
2105         if (nid != p->numa_preferred_nid)
2106             sched_setnuma(p, nid);
2107     }
2108 
2109     /* No better CPU than the current one was found. */
2110     if (env.best_cpu == -1) {
2111         trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
2112         return -EAGAIN;
2113     }
2114 
2115     best_rq = cpu_rq(env.best_cpu);
2116     if (env.best_task == NULL) {
2117         ret = migrate_task_to(p, env.best_cpu);
2118         WRITE_ONCE(best_rq->numa_migrate_on, 0);
2119         if (ret != 0)
2120             trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
2121         return ret;
2122     }
2123 
2124     ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
2125     WRITE_ONCE(best_rq->numa_migrate_on, 0);
2126 
2127     if (ret != 0)
2128         trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
2129     put_task_struct(env.best_task);
2130     return ret;
2131 }
2132 
2133 /* Attempt to migrate a task to a CPU on the preferred node. */
2134 static void numa_migrate_preferred(struct task_struct *p)
2135 {
2136     unsigned long interval = HZ;
2137 
2138     /* This task has no NUMA fault statistics yet */
2139     if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
2140         return;
2141 
2142     /* Periodically retry migrating the task to the preferred node */
2143     interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
2144     p->numa_migrate_retry = jiffies + interval;
2145 
2146     /* Success if task is already running on preferred CPU */
2147     if (task_node(p) == p->numa_preferred_nid)
2148         return;
2149 
2150     /* Otherwise, try migrate to a CPU on the preferred node */
2151     task_numa_migrate(p);
2152 }
2153 
2154 /*
2155  * Find out how many nodes the workload is actively running on. Do this by
2156  * tracking the nodes from which NUMA hinting faults are triggered. This can
2157  * be different from the set of nodes where the workload's memory is currently
2158  * located.
2159  */
2160 static void numa_group_count_active_nodes(struct numa_group *numa_group)
2161 {
2162     unsigned long faults, max_faults = 0;
2163     int nid, active_nodes = 0;
2164 
2165     for_each_node_state(nid, N_CPU) {
2166         faults = group_faults_cpu(numa_group, nid);
2167         if (faults > max_faults)
2168             max_faults = faults;
2169     }
2170 
2171     for_each_node_state(nid, N_CPU) {
2172         faults = group_faults_cpu(numa_group, nid);
2173         if (faults * ACTIVE_NODE_FRACTION > max_faults)
2174             active_nodes++;
2175     }
2176 
2177     numa_group->max_faults_cpu = max_faults;
2178     numa_group->active_nodes = active_nodes;
2179 }
2180 
2181 /*
2182  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
2183  * increments. The more local the fault statistics are, the higher the scan
2184  * period will be for the next scan window. If local/(local+remote) ratio is
2185  * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
2186  * the scan period will decrease. Aim for 70% local accesses.
2187  */
2188 #define NUMA_PERIOD_SLOTS 10
2189 #define NUMA_PERIOD_THRESHOLD 7
2190 
2191 /*
2192  * Increase the scan period (slow down scanning) if the majority of
2193  * our memory is already on our local node, or if the majority of
2194  * the page accesses are shared with other processes.
2195  * Otherwise, decrease the scan period.
2196  */
2197 static void update_task_scan_period(struct task_struct *p,
2198             unsigned long shared, unsigned long private)
2199 {
2200     unsigned int period_slot;
2201     int lr_ratio, ps_ratio;
2202     int diff;
2203 
2204     unsigned long remote = p->numa_faults_locality[0];
2205     unsigned long local = p->numa_faults_locality[1];
2206 
2207     /*
2208      * If there were no record hinting faults then either the task is
2209      * completely idle or all activity is in areas that are not of interest
2210      * to automatic numa balancing. Related to that, if there were failed
2211      * migration then it implies we are migrating too quickly or the local
2212      * node is overloaded. In either case, scan slower
2213      */
2214     if (local + shared == 0 || p->numa_faults_locality[2]) {
2215         p->numa_scan_period = min(p->numa_scan_period_max,
2216             p->numa_scan_period << 1);
2217 
2218         p->mm->numa_next_scan = jiffies +
2219             msecs_to_jiffies(p->numa_scan_period);
2220 
2221         return;
2222     }
2223 
2224     /*
2225      * Prepare to scale scan period relative to the current period.
2226      *   == NUMA_PERIOD_THRESHOLD scan period stays the same
2227      *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
2228      *   >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
2229      */
2230     period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
2231     lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
2232     ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
2233 
2234     if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
2235         /*
2236          * Most memory accesses are local. There is no need to
2237          * do fast NUMA scanning, since memory is already local.
2238          */
2239         int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
2240         if (!slot)
2241             slot = 1;
2242         diff = slot * period_slot;
2243     } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
2244         /*
2245          * Most memory accesses are shared with other tasks.
2246          * There is no point in continuing fast NUMA scanning,
2247          * since other tasks may just move the memory elsewhere.
2248          */
2249         int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
2250         if (!slot)
2251             slot = 1;
2252         diff = slot * period_slot;
2253     } else {
2254         /*
2255          * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
2256          * yet they are not on the local NUMA node. Speed up
2257          * NUMA scanning to get the memory moved over.
2258          */
2259         int ratio = max(lr_ratio, ps_ratio);
2260         diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
2261     }
2262 
2263     p->numa_scan_period = clamp(p->numa_scan_period + diff,
2264             task_scan_min(p), task_scan_max(p));
2265     memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2266 }
2267 
2268 /*
2269  * Get the fraction of time the task has been running since the last
2270  * NUMA placement cycle. The scheduler keeps similar statistics, but
2271  * decays those on a 32ms period, which is orders of magnitude off
2272  * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2273  * stats only if the task is so new there are no NUMA statistics yet.
2274  */
2275 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2276 {
2277     u64 runtime, delta, now;
2278     /* Use the start of this time slice to avoid calculations. */
2279     now = p->se.exec_start;
2280     runtime = p->se.sum_exec_runtime;
2281 
2282     if (p->last_task_numa_placement) {
2283         delta = runtime - p->last_sum_exec_runtime;
2284         *period = now - p->last_task_numa_placement;
2285 
2286         /* Avoid time going backwards, prevent potential divide error: */
2287         if (unlikely((s64)*period < 0))
2288             *period = 0;
2289     } else {
2290         delta = p->se.avg.load_sum;
2291         *period = LOAD_AVG_MAX;
2292     }
2293 
2294     p->last_sum_exec_runtime = runtime;
2295     p->last_task_numa_placement = now;
2296 
2297     return delta;
2298 }
2299 
2300 /*
2301  * Determine the preferred nid for a task in a numa_group. This needs to
2302  * be done in a way that produces consistent results with group_weight,
2303  * otherwise workloads might not converge.
2304  */
2305 static int preferred_group_nid(struct task_struct *p, int nid)
2306 {
2307     nodemask_t nodes;
2308     int dist;
2309 
2310     /* Direct connections between all NUMA nodes. */
2311     if (sched_numa_topology_type == NUMA_DIRECT)
2312         return nid;
2313 
2314     /*
2315      * On a system with glueless mesh NUMA topology, group_weight
2316      * scores nodes according to the number of NUMA hinting faults on
2317      * both the node itself, and on nearby nodes.
2318      */
2319     if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2320         unsigned long score, max_score = 0;
2321         int node, max_node = nid;
2322 
2323         dist = sched_max_numa_distance;
2324 
2325         for_each_node_state(node, N_CPU) {
2326             score = group_weight(p, node, dist);
2327             if (score > max_score) {
2328                 max_score = score;
2329                 max_node = node;
2330             }
2331         }
2332         return max_node;
2333     }
2334 
2335     /*
2336      * Finding the preferred nid in a system with NUMA backplane
2337      * interconnect topology is more involved. The goal is to locate
2338      * tasks from numa_groups near each other in the system, and
2339      * untangle workloads from different sides of the system. This requires
2340      * searching down the hierarchy of node groups, recursively searching
2341      * inside the highest scoring group of nodes. The nodemask tricks
2342      * keep the complexity of the search down.
2343      */
2344     nodes = node_states[N_CPU];
2345     for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2346         unsigned long max_faults = 0;
2347         nodemask_t max_group = NODE_MASK_NONE;
2348         int a, b;
2349 
2350         /* Are there nodes at this distance from each other? */
2351         if (!find_numa_distance(dist))
2352             continue;
2353 
2354         for_each_node_mask(a, nodes) {
2355             unsigned long faults = 0;
2356             nodemask_t this_group;
2357             nodes_clear(this_group);
2358 
2359             /* Sum group's NUMA faults; includes a==b case. */
2360             for_each_node_mask(b, nodes) {
2361                 if (node_distance(a, b) < dist) {
2362                     faults += group_faults(p, b);
2363                     node_set(b, this_group);
2364                     node_clear(b, nodes);
2365                 }
2366             }
2367 
2368             /* Remember the top group. */
2369             if (faults > max_faults) {
2370                 max_faults = faults;
2371                 max_group = this_group;
2372                 /*
2373                  * subtle: at the smallest distance there is
2374                  * just one node left in each "group", the
2375                  * winner is the preferred nid.
2376                  */
2377                 nid = a;
2378             }
2379         }
2380         /* Next round, evaluate the nodes within max_group. */
2381         if (!max_faults)
2382             break;
2383         nodes = max_group;
2384     }
2385     return nid;
2386 }
2387 
2388 static void task_numa_placement(struct task_struct *p)
2389 {
2390     int seq, nid, max_nid = NUMA_NO_NODE;
2391     unsigned long max_faults = 0;
2392     unsigned long fault_types[2] = { 0, 0 };
2393     unsigned long total_faults;
2394     u64 runtime, period;
2395     spinlock_t *group_lock = NULL;
2396     struct numa_group *ng;
2397 
2398     /*
2399      * The p->mm->numa_scan_seq field gets updated without
2400      * exclusive access. Use READ_ONCE() here to ensure
2401      * that the field is read in a single access:
2402      */
2403     seq = READ_ONCE(p->mm->numa_scan_seq);
2404     if (p->numa_scan_seq == seq)
2405         return;
2406     p->numa_scan_seq = seq;
2407     p->numa_scan_period_max = task_scan_max(p);
2408 
2409     total_faults = p->numa_faults_locality[0] +
2410                p->numa_faults_locality[1];
2411     runtime = numa_get_avg_runtime(p, &period);
2412 
2413     /* If the task is part of a group prevent parallel updates to group stats */
2414     ng = deref_curr_numa_group(p);
2415     if (ng) {
2416         group_lock = &ng->lock;
2417         spin_lock_irq(group_lock);
2418     }
2419 
2420     /* Find the node with the highest number of faults */
2421     for_each_online_node(nid) {
2422         /* Keep track of the offsets in numa_faults array */
2423         int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2424         unsigned long faults = 0, group_faults = 0;
2425         int priv;
2426 
2427         for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2428             long diff, f_diff, f_weight;
2429 
2430             mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2431             membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2432             cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2433             cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2434 
2435             /* Decay existing window, copy faults since last scan */
2436             diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2437             fault_types[priv] += p->numa_faults[membuf_idx];
2438             p->numa_faults[membuf_idx] = 0;
2439 
2440             /*
2441              * Normalize the faults_from, so all tasks in a group
2442              * count according to CPU use, instead of by the raw
2443              * number of faults. Tasks with little runtime have
2444              * little over-all impact on throughput, and thus their
2445              * faults are less important.
2446              */
2447             f_weight = div64_u64(runtime << 16, period + 1);
2448             f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2449                    (total_faults + 1);
2450             f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2451             p->numa_faults[cpubuf_idx] = 0;
2452 
2453             p->numa_faults[mem_idx] += diff;
2454             p->numa_faults[cpu_idx] += f_diff;
2455             faults += p->numa_faults[mem_idx];
2456             p->total_numa_faults += diff;
2457             if (ng) {
2458                 /*
2459                  * safe because we can only change our own group
2460                  *
2461                  * mem_idx represents the offset for a given
2462                  * nid and priv in a specific region because it
2463                  * is at the beginning of the numa_faults array.
2464                  */
2465                 ng->faults[mem_idx] += diff;
2466                 ng->faults[cpu_idx] += f_diff;
2467                 ng->total_faults += diff;
2468                 group_faults += ng->faults[mem_idx];
2469             }
2470         }
2471 
2472         if (!ng) {
2473             if (faults > max_faults) {
2474                 max_faults = faults;
2475                 max_nid = nid;
2476             }
2477         } else if (group_faults > max_faults) {
2478             max_faults = group_faults;
2479             max_nid = nid;
2480         }
2481     }
2482 
2483     /* Cannot migrate task to CPU-less node */
2484     if (max_nid != NUMA_NO_NODE && !node_state(max_nid, N_CPU)) {
2485         int near_nid = max_nid;
2486         int distance, near_distance = INT_MAX;
2487 
2488         for_each_node_state(nid, N_CPU) {
2489             distance = node_distance(max_nid, nid);
2490             if (distance < near_distance) {
2491                 near_nid = nid;
2492                 near_distance = distance;
2493             }
2494         }
2495         max_nid = near_nid;
2496     }
2497 
2498     if (ng) {
2499         numa_group_count_active_nodes(ng);
2500         spin_unlock_irq(group_lock);
2501         max_nid = preferred_group_nid(p, max_nid);
2502     }
2503 
2504     if (max_faults) {
2505         /* Set the new preferred node */
2506         if (max_nid != p->numa_preferred_nid)
2507             sched_setnuma(p, max_nid);
2508     }
2509 
2510     update_task_scan_period(p, fault_types[0], fault_types[1]);
2511 }
2512 
2513 static inline int get_numa_group(struct numa_group *grp)
2514 {
2515     return refcount_inc_not_zero(&grp->refcount);
2516 }
2517 
2518 static inline void put_numa_group(struct numa_group *grp)
2519 {
2520     if (refcount_dec_and_test(&grp->refcount))
2521         kfree_rcu(grp, rcu);
2522 }
2523 
2524 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2525             int *priv)
2526 {
2527     struct numa_group *grp, *my_grp;
2528     struct task_struct *tsk;
2529     bool join = false;
2530     int cpu = cpupid_to_cpu(cpupid);
2531     int i;
2532 
2533     if (unlikely(!deref_curr_numa_group(p))) {
2534         unsigned int size = sizeof(struct numa_group) +
2535                     NR_NUMA_HINT_FAULT_STATS *
2536                     nr_node_ids * sizeof(unsigned long);
2537 
2538         grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2539         if (!grp)
2540             return;
2541 
2542         refcount_set(&grp->refcount, 1);
2543         grp->active_nodes = 1;
2544         grp->max_faults_cpu = 0;
2545         spin_lock_init(&grp->lock);
2546         grp->gid = p->pid;
2547 
2548         for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2549             grp->faults[i] = p->numa_faults[i];
2550 
2551         grp->total_faults = p->total_numa_faults;
2552 
2553         grp->nr_tasks++;
2554         rcu_assign_pointer(p->numa_group, grp);
2555     }
2556 
2557     rcu_read_lock();
2558     tsk = READ_ONCE(cpu_rq(cpu)->curr);
2559 
2560     if (!cpupid_match_pid(tsk, cpupid))
2561         goto no_join;
2562 
2563     grp = rcu_dereference(tsk->numa_group);
2564     if (!grp)
2565         goto no_join;
2566 
2567     my_grp = deref_curr_numa_group(p);
2568     if (grp == my_grp)
2569         goto no_join;
2570 
2571     /*
2572      * Only join the other group if its bigger; if we're the bigger group,
2573      * the other task will join us.
2574      */
2575     if (my_grp->nr_tasks > grp->nr_tasks)
2576         goto no_join;
2577 
2578     /*
2579      * Tie-break on the grp address.
2580      */
2581     if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2582         goto no_join;
2583 
2584     /* Always join threads in the same process. */
2585     if (tsk->mm == current->mm)
2586         join = true;
2587 
2588     /* Simple filter to avoid false positives due to PID collisions */
2589     if (flags & TNF_SHARED)
2590         join = true;
2591 
2592     /* Update priv based on whether false sharing was detected */
2593     *priv = !join;
2594 
2595     if (join && !get_numa_group(grp))
2596         goto no_join;
2597 
2598     rcu_read_unlock();
2599 
2600     if (!join)
2601         return;
2602 
2603     BUG_ON(irqs_disabled());
2604     double_lock_irq(&my_grp->lock, &grp->lock);
2605 
2606     for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2607         my_grp->faults[i] -= p->numa_faults[i];
2608         grp->faults[i] += p->numa_faults[i];
2609     }
2610     my_grp->total_faults -= p->total_numa_faults;
2611     grp->total_faults += p->total_numa_faults;
2612 
2613     my_grp->nr_tasks--;
2614     grp->nr_tasks++;
2615 
2616     spin_unlock(&my_grp->lock);
2617     spin_unlock_irq(&grp->lock);
2618 
2619     rcu_assign_pointer(p->numa_group, grp);
2620 
2621     put_numa_group(my_grp);
2622     return;
2623 
2624 no_join:
2625     rcu_read_unlock();
2626     return;
2627 }
2628 
2629 /*
2630  * Get rid of NUMA statistics associated with a task (either current or dead).
2631  * If @final is set, the task is dead and has reached refcount zero, so we can
2632  * safely free all relevant data structures. Otherwise, there might be
2633  * concurrent reads from places like load balancing and procfs, and we should
2634  * reset the data back to default state without freeing ->numa_faults.
2635  */
2636 void task_numa_free(struct task_struct *p, bool final)
2637 {
2638     /* safe: p either is current or is being freed by current */
2639     struct numa_group *grp = rcu_dereference_raw(p->numa_group);
2640     unsigned long *numa_faults = p->numa_faults;
2641     unsigned long flags;
2642     int i;
2643 
2644     if (!numa_faults)
2645         return;
2646 
2647     if (grp) {
2648         spin_lock_irqsave(&grp->lock, flags);
2649         for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2650             grp->faults[i] -= p->numa_faults[i];
2651         grp->total_faults -= p->total_numa_faults;
2652 
2653         grp->nr_tasks--;
2654         spin_unlock_irqrestore(&grp->lock, flags);
2655         RCU_INIT_POINTER(p->numa_group, NULL);
2656         put_numa_group(grp);
2657     }
2658 
2659     if (final) {
2660         p->numa_faults = NULL;
2661         kfree(numa_faults);
2662     } else {
2663         p->total_numa_faults = 0;
2664         for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2665             numa_faults[i] = 0;
2666     }
2667 }
2668 
2669 /*
2670  * Got a PROT_NONE fault for a page on @node.
2671  */
2672 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2673 {
2674     struct task_struct *p = current;
2675     bool migrated = flags & TNF_MIGRATED;
2676     int cpu_node = task_node(current);
2677     int local = !!(flags & TNF_FAULT_LOCAL);
2678     struct numa_group *ng;
2679     int priv;
2680 
2681     if (!static_branch_likely(&sched_numa_balancing))
2682         return;
2683 
2684     /* for example, ksmd faulting in a user's mm */
2685     if (!p->mm)
2686         return;
2687 
2688     /* Allocate buffer to track faults on a per-node basis */
2689     if (unlikely(!p->numa_faults)) {
2690         int size = sizeof(*p->numa_faults) *
2691                NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2692 
2693         p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2694         if (!p->numa_faults)
2695             return;
2696 
2697         p->total_numa_faults = 0;
2698         memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2699     }
2700 
2701     /*
2702      * First accesses are treated as private, otherwise consider accesses
2703      * to be private if the accessing pid has not changed
2704      */
2705     if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2706         priv = 1;
2707     } else {
2708         priv = cpupid_match_pid(p, last_cpupid);
2709         if (!priv && !(flags & TNF_NO_GROUP))
2710             task_numa_group(p, last_cpupid, flags, &priv);
2711     }
2712 
2713     /*
2714      * If a workload spans multiple NUMA nodes, a shared fault that
2715      * occurs wholly within the set of nodes that the workload is
2716      * actively using should be counted as local. This allows the
2717      * scan rate to slow down when a workload has settled down.
2718      */
2719     ng = deref_curr_numa_group(p);
2720     if (!priv && !local && ng && ng->active_nodes > 1 &&
2721                 numa_is_active_node(cpu_node, ng) &&
2722                 numa_is_active_node(mem_node, ng))
2723         local = 1;
2724 
2725     /*
2726      * Retry to migrate task to preferred node periodically, in case it
2727      * previously failed, or the scheduler moved us.
2728      */
2729     if (time_after(jiffies, p->numa_migrate_retry)) {
2730         task_numa_placement(p);
2731         numa_migrate_preferred(p);
2732     }
2733 
2734     if (migrated)
2735         p->numa_pages_migrated += pages;
2736     if (flags & TNF_MIGRATE_FAIL)
2737         p->numa_faults_locality[2] += pages;
2738 
2739     p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2740     p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2741     p->numa_faults_locality[local] += pages;
2742 }
2743 
2744 static void reset_ptenuma_scan(struct task_struct *p)
2745 {
2746     /*
2747      * We only did a read acquisition of the mmap sem, so
2748      * p->mm->numa_scan_seq is written to without exclusive access
2749      * and the update is not guaranteed to be atomic. That's not
2750      * much of an issue though, since this is just used for
2751      * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2752      * expensive, to avoid any form of compiler optimizations:
2753      */
2754     WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2755     p->mm->numa_scan_offset = 0;
2756 }
2757 
2758 /*
2759  * The expensive part of numa migration is done from task_work context.
2760  * Triggered from task_tick_numa().
2761  */
2762 static void task_numa_work(struct callback_head *work)
2763 {
2764     unsigned long migrate, next_scan, now = jiffies;
2765     struct task_struct *p = current;
2766     struct mm_struct *mm = p->mm;
2767     u64 runtime = p->se.sum_exec_runtime;
2768     struct vm_area_struct *vma;
2769     unsigned long start, end;
2770     unsigned long nr_pte_updates = 0;
2771     long pages, virtpages;
2772 
2773     SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2774 
2775     work->next = work;
2776     /*
2777      * Who cares about NUMA placement when they're dying.
2778      *
2779      * NOTE: make sure not to dereference p->mm before this check,
2780      * exit_task_work() happens _after_ exit_mm() so we could be called
2781      * without p->mm even though we still had it when we enqueued this
2782      * work.
2783      */
2784     if (p->flags & PF_EXITING)
2785         return;
2786 
2787     if (!mm->numa_next_scan) {
2788         mm->numa_next_scan = now +
2789             msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2790     }
2791 
2792     /*
2793      * Enforce maximal scan/migration frequency..
2794      */
2795     migrate = mm->numa_next_scan;
2796     if (time_before(now, migrate))
2797         return;
2798 
2799     if (p->numa_scan_period == 0) {
2800         p->numa_scan_period_max = task_scan_max(p);
2801         p->numa_scan_period = task_scan_start(p);
2802     }
2803 
2804     next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2805     if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2806         return;
2807 
2808     /*
2809      * Delay this task enough that another task of this mm will likely win
2810      * the next time around.
2811      */
2812     p->node_stamp += 2 * TICK_NSEC;
2813 
2814     start = mm->numa_scan_offset;
2815     pages = sysctl_numa_balancing_scan_size;
2816     pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2817     virtpages = pages * 8;     /* Scan up to this much virtual space */
2818     if (!pages)
2819         return;
2820 
2821 
2822     if (!mmap_read_trylock(mm))
2823         return;
2824     vma = find_vma(mm, start);
2825     if (!vma) {
2826         reset_ptenuma_scan(p);
2827         start = 0;
2828         vma = mm->mmap;
2829     }
2830     for (; vma; vma = vma->vm_next) {
2831         if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2832             is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2833             continue;
2834         }
2835 
2836         /*
2837          * Shared library pages mapped by multiple processes are not
2838          * migrated as it is expected they are cache replicated. Avoid
2839          * hinting faults in read-only file-backed mappings or the vdso
2840          * as migrating the pages will be of marginal benefit.
2841          */
2842         if (!vma->vm_mm ||
2843             (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2844             continue;
2845 
2846         /*
2847          * Skip inaccessible VMAs to avoid any confusion between
2848          * PROT_NONE and NUMA hinting ptes
2849          */
2850         if (!vma_is_accessible(vma))
2851             continue;
2852 
2853         do {
2854             start = max(start, vma->vm_start);
2855             end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2856             end = min(end, vma->vm_end);
2857             nr_pte_updates = change_prot_numa(vma, start, end);
2858 
2859             /*
2860              * Try to scan sysctl_numa_balancing_size worth of
2861              * hpages that have at least one present PTE that
2862              * is not already pte-numa. If the VMA contains
2863              * areas that are unused or already full of prot_numa
2864              * PTEs, scan up to virtpages, to skip through those
2865              * areas faster.
2866              */
2867             if (nr_pte_updates)
2868                 pages -= (end - start) >> PAGE_SHIFT;
2869             virtpages -= (end - start) >> PAGE_SHIFT;
2870 
2871             start = end;
2872             if (pages <= 0 || virtpages <= 0)
2873                 goto out;
2874 
2875             cond_resched();
2876         } while (end != vma->vm_end);
2877     }
2878 
2879 out:
2880     /*
2881      * It is possible to reach the end of the VMA list but the last few
2882      * VMAs are not guaranteed to the vma_migratable. If they are not, we
2883      * would find the !migratable VMA on the next scan but not reset the
2884      * scanner to the start so check it now.
2885      */
2886     if (vma)
2887         mm->numa_scan_offset = start;
2888     else
2889         reset_ptenuma_scan(p);
2890     mmap_read_unlock(mm);
2891 
2892     /*
2893      * Make sure tasks use at least 32x as much time to run other code
2894      * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2895      * Usually update_task_scan_period slows down scanning enough; on an
2896      * overloaded system we need to limit overhead on a per task basis.
2897      */
2898     if (unlikely(p->se.sum_exec_runtime != runtime)) {
2899         u64 diff = p->se.sum_exec_runtime - runtime;
2900         p->node_stamp += 32 * diff;
2901     }
2902 }
2903 
2904 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
2905 {
2906     int mm_users = 0;
2907     struct mm_struct *mm = p->mm;
2908 
2909     if (mm) {
2910         mm_users = atomic_read(&mm->mm_users);
2911         if (mm_users == 1) {
2912             mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2913             mm->numa_scan_seq = 0;
2914         }
2915     }
2916     p->node_stamp           = 0;
2917     p->numa_scan_seq        = mm ? mm->numa_scan_seq : 0;
2918     p->numa_scan_period     = sysctl_numa_balancing_scan_delay;
2919     p->numa_migrate_retry       = 0;
2920     /* Protect against double add, see task_tick_numa and task_numa_work */
2921     p->numa_work.next       = &p->numa_work;
2922     p->numa_faults          = NULL;
2923     p->numa_pages_migrated      = 0;
2924     p->total_numa_faults        = 0;
2925     RCU_INIT_POINTER(p->numa_group, NULL);
2926     p->last_task_numa_placement = 0;
2927     p->last_sum_exec_runtime    = 0;
2928 
2929     init_task_work(&p->numa_work, task_numa_work);
2930 
2931     /* New address space, reset the preferred nid */
2932     if (!(clone_flags & CLONE_VM)) {
2933         p->numa_preferred_nid = NUMA_NO_NODE;
2934         return;
2935     }
2936 
2937     /*
2938      * New thread, keep existing numa_preferred_nid which should be copied
2939      * already by arch_dup_task_struct but stagger when scans start.
2940      */
2941     if (mm) {
2942         unsigned int delay;
2943 
2944         delay = min_t(unsigned int, task_scan_max(current),
2945             current->numa_scan_period * mm_users * NSEC_PER_MSEC);
2946         delay += 2 * TICK_NSEC;
2947         p->node_stamp = delay;
2948     }
2949 }
2950 
2951 /*
2952  * Drive the periodic memory faults..
2953  */
2954 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2955 {
2956     struct callback_head *work = &curr->numa_work;
2957     u64 period, now;
2958 
2959     /*
2960      * We don't care about NUMA placement if we don't have memory.
2961      */
2962     if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
2963         return;
2964 
2965     /*
2966      * Using runtime rather than walltime has the dual advantage that
2967      * we (mostly) drive the selection from busy threads and that the
2968      * task needs to have done some actual work before we bother with
2969      * NUMA placement.
2970      */
2971     now = curr->se.sum_exec_runtime;
2972     period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2973 
2974     if (now > curr->node_stamp + period) {
2975         if (!curr->node_stamp)
2976             curr->numa_scan_period = task_scan_start(curr);
2977         curr->node_stamp += period;
2978 
2979         if (!time_before(jiffies, curr->mm->numa_next_scan))
2980             task_work_add(curr, work, TWA_RESUME);
2981     }
2982 }
2983 
2984 static void update_scan_period(struct task_struct *p, int new_cpu)
2985 {
2986     int src_nid = cpu_to_node(task_cpu(p));
2987     int dst_nid = cpu_to_node(new_cpu);
2988 
2989     if (!static_branch_likely(&sched_numa_balancing))
2990         return;
2991 
2992     if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
2993         return;
2994 
2995     if (src_nid == dst_nid)
2996         return;
2997 
2998     /*
2999      * Allow resets if faults have been trapped before one scan
3000      * has completed. This is most likely due to a new task that
3001      * is pulled cross-node due to wakeups or load balancing.
3002      */
3003     if (p->numa_scan_seq) {
3004         /*
3005          * Avoid scan adjustments if moving to the preferred
3006          * node or if the task was not previously running on
3007          * the preferred node.
3008          */
3009         if (dst_nid == p->numa_preferred_nid ||
3010             (p->numa_preferred_nid != NUMA_NO_NODE &&
3011             src_nid != p->numa_preferred_nid))
3012             return;
3013     }
3014 
3015     p->numa_scan_period = task_scan_start(p);
3016 }
3017 
3018 #else
3019 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3020 {
3021 }
3022 
3023 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
3024 {
3025 }
3026 
3027 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
3028 {
3029 }
3030 
3031 static inline void update_scan_period(struct task_struct *p, int new_cpu)
3032 {
3033 }
3034 
3035 #endif /* CONFIG_NUMA_BALANCING */
3036 
3037 static void
3038 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3039 {
3040     update_load_add(&cfs_rq->load, se->load.weight);
3041 #ifdef CONFIG_SMP
3042     if (entity_is_task(se)) {
3043         struct rq *rq = rq_of(cfs_rq);
3044 
3045         account_numa_enqueue(rq, task_of(se));
3046         list_add(&se->group_node, &rq->cfs_tasks);
3047     }
3048 #endif
3049     cfs_rq->nr_running++;
3050     if (se_is_idle(se))
3051         cfs_rq->idle_nr_running++;
3052 }
3053 
3054 static void
3055 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3056 {
3057     update_load_sub(&cfs_rq->load, se->load.weight);
3058 #ifdef CONFIG_SMP
3059     if (entity_is_task(se)) {
3060         account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3061         list_del_init(&se->group_node);
3062     }
3063 #endif
3064     cfs_rq->nr_running--;
3065     if (se_is_idle(se))
3066         cfs_rq->idle_nr_running--;
3067 }
3068 
3069 /*
3070  * Signed add and clamp on underflow.
3071  *
3072  * Explicitly do a load-store to ensure the intermediate value never hits
3073  * memory. This allows lockless observations without ever seeing the negative
3074  * values.
3075  */
3076 #define add_positive(_ptr, _val) do {                           \
3077     typeof(_ptr) ptr = (_ptr);                              \
3078     typeof(_val) val = (_val);                              \
3079     typeof(*ptr) res, var = READ_ONCE(*ptr);                \
3080                                 \
3081     res = var + val;                                        \
3082                                 \
3083     if (val < 0 && res > var)                               \
3084         res = 0;                                        \
3085                                 \
3086     WRITE_ONCE(*ptr, res);                                  \
3087 } while (0)
3088 
3089 /*
3090  * Unsigned subtract and clamp on underflow.
3091  *
3092  * Explicitly do a load-store to ensure the intermediate value never hits
3093  * memory. This allows lockless observations without ever seeing the negative
3094  * values.
3095  */
3096 #define sub_positive(_ptr, _val) do {               \
3097     typeof(_ptr) ptr = (_ptr);              \
3098     typeof(*ptr) val = (_val);              \
3099     typeof(*ptr) res, var = READ_ONCE(*ptr);        \
3100     res = var - val;                    \
3101     if (res > var)                      \
3102         res = 0;                    \
3103     WRITE_ONCE(*ptr, res);                  \
3104 } while (0)
3105 
3106 /*
3107  * Remove and clamp on negative, from a local variable.
3108  *
3109  * A variant of sub_positive(), which does not use explicit load-store
3110  * and is thus optimized for local variable updates.
3111  */
3112 #define lsub_positive(_ptr, _val) do {              \
3113     typeof(_ptr) ptr = (_ptr);              \
3114     *ptr -= min_t(typeof(*ptr), *ptr, _val);        \
3115 } while (0)
3116 
3117 #ifdef CONFIG_SMP
3118 static inline void
3119 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3120 {
3121     cfs_rq->avg.load_avg += se->avg.load_avg;
3122     cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3123 }
3124 
3125 static inline void
3126 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3127 {
3128     sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3129     sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3130     /* See update_cfs_rq_load_avg() */
3131     cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3132                       cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3133 }
3134 #else
3135 static inline void
3136 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3137 static inline void
3138 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3139 #endif
3140 
3141 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
3142                 unsigned long weight)
3143 {
3144     if (se->on_rq) {
3145         /* commit outstanding execution time */
3146         if (cfs_rq->curr == se)
3147             update_curr(cfs_rq);
3148         update_load_sub(&cfs_rq->load, se->load.weight);
3149     }
3150     dequeue_load_avg(cfs_rq, se);
3151 
3152     update_load_set(&se->load, weight);
3153 
3154 #ifdef CONFIG_SMP
3155     do {
3156         u32 divider = get_pelt_divider(&se->avg);
3157 
3158         se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3159     } while (0);
3160 #endif
3161 
3162     enqueue_load_avg(cfs_rq, se);
3163     if (se->on_rq)
3164         update_load_add(&cfs_rq->load, se->load.weight);
3165 
3166 }
3167 
3168 void reweight_task(struct task_struct *p, int prio)
3169 {
3170     struct sched_entity *se = &p->se;
3171     struct cfs_rq *cfs_rq = cfs_rq_of(se);
3172     struct load_weight *load = &se->load;
3173     unsigned long weight = scale_load(sched_prio_to_weight[prio]);
3174 
3175     reweight_entity(cfs_rq, se, weight);
3176     load->inv_weight = sched_prio_to_wmult[prio];
3177 }
3178 
3179 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3180 
3181 #ifdef CONFIG_FAIR_GROUP_SCHED
3182 #ifdef CONFIG_SMP
3183 /*
3184  * All this does is approximate the hierarchical proportion which includes that
3185  * global sum we all love to hate.
3186  *
3187  * That is, the weight of a group entity, is the proportional share of the
3188  * group weight based on the group runqueue weights. That is:
3189  *
3190  *                     tg->weight * grq->load.weight
3191  *   ge->load.weight = -----------------------------               (1)
3192  *                       \Sum grq->load.weight
3193  *
3194  * Now, because computing that sum is prohibitively expensive to compute (been
3195  * there, done that) we approximate it with this average stuff. The average
3196  * moves slower and therefore the approximation is cheaper and more stable.
3197  *
3198  * So instead of the above, we substitute:
3199  *
3200  *   grq->load.weight -> grq->avg.load_avg                         (2)
3201  *
3202  * which yields the following:
3203  *
3204  *                     tg->weight * grq->avg.load_avg
3205  *   ge->load.weight = ------------------------------              (3)
3206  *                             tg->load_avg
3207  *
3208  * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3209  *
3210  * That is shares_avg, and it is right (given the approximation (2)).
3211  *
3212  * The problem with it is that because the average is slow -- it was designed
3213  * to be exactly that of course -- this leads to transients in boundary
3214  * conditions. In specific, the case where the group was idle and we start the
3215  * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3216  * yielding bad latency etc..
3217  *
3218  * Now, in that special case (1) reduces to:
3219  *
3220  *                     tg->weight * grq->load.weight
3221  *   ge->load.weight = ----------------------------- = tg->weight   (4)
3222  *                         grp->load.weight
3223  *
3224  * That is, the sum collapses because all other CPUs are idle; the UP scenario.
3225  *
3226  * So what we do is modify our approximation (3) to approach (4) in the (near)
3227  * UP case, like:
3228  *
3229  *   ge->load.weight =
3230  *
3231  *              tg->weight * grq->load.weight
3232  *     ---------------------------------------------------         (5)
3233  *     tg->load_avg - grq->avg.load_avg + grq->load.weight
3234  *
3235  * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3236  * we need to use grq->avg.load_avg as its lower bound, which then gives:
3237  *
3238  *
3239  *                     tg->weight * grq->load.weight
3240  *   ge->load.weight = -----------------------------           (6)
3241  *                             tg_load_avg'
3242  *
3243  * Where:
3244  *
3245  *   tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3246  *                  max(grq->load.weight, grq->avg.load_avg)
3247  *
3248  * And that is shares_weight and is icky. In the (near) UP case it approaches
3249  * (4) while in the normal case it approaches (3). It consistently
3250  * overestimates the ge->load.weight and therefore:
3251  *
3252  *   \Sum ge->load.weight >= tg->weight
3253  *
3254  * hence icky!
3255  */
3256 static long calc_group_shares(struct cfs_rq *cfs_rq)
3257 {
3258     long tg_weight, tg_shares, load, shares;
3259     struct task_group *tg = cfs_rq->tg;
3260 
3261     tg_shares = READ_ONCE(tg->shares);
3262 
3263     load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3264 
3265     tg_weight = atomic_long_read(&tg->load_avg);
3266 
3267     /* Ensure tg_weight >= load */
3268     tg_weight -= cfs_rq->tg_load_avg_contrib;
3269     tg_weight += load;
3270 
3271     shares = (tg_shares * load);
3272     if (tg_weight)
3273         shares /= tg_weight;
3274 
3275     /*
3276      * MIN_SHARES has to be unscaled here to support per-CPU partitioning
3277      * of a group with small tg->shares value. It is a floor value which is
3278      * assigned as a minimum load.weight to the sched_entity representing
3279      * the group on a CPU.
3280      *
3281      * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
3282      * on an 8-core system with 8 tasks each runnable on one CPU shares has
3283      * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
3284      * case no task is runnable on a CPU MIN_SHARES=2 should be returned
3285      * instead of 0.
3286      */
3287     return clamp_t(long, shares, MIN_SHARES, tg_shares);
3288 }
3289 #endif /* CONFIG_SMP */
3290 
3291 /*
3292  * Recomputes the group entity based on the current state of its group
3293  * runqueue.
3294  */
3295 static void update_cfs_group(struct sched_entity *se)
3296 {
3297     struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3298     long shares;
3299 
3300     if (!gcfs_rq)
3301         return;
3302 
3303     if (throttled_hierarchy(gcfs_rq))
3304         return;
3305 
3306 #ifndef CONFIG_SMP
3307     shares = READ_ONCE(gcfs_rq->tg->shares);
3308 
3309     if (likely(se->load.weight == shares))
3310         return;
3311 #else
3312     shares   = calc_group_shares(gcfs_rq);
3313 #endif
3314 
3315     reweight_entity(cfs_rq_of(se), se, shares);
3316 }
3317 
3318 #else /* CONFIG_FAIR_GROUP_SCHED */
3319 static inline void update_cfs_group(struct sched_entity *se)
3320 {
3321 }
3322 #endif /* CONFIG_FAIR_GROUP_SCHED */
3323 
3324 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
3325 {
3326     struct rq *rq = rq_of(cfs_rq);
3327 
3328     if (&rq->cfs == cfs_rq) {
3329         /*
3330          * There are a few boundary cases this might miss but it should
3331          * get called often enough that that should (hopefully) not be
3332          * a real problem.
3333          *
3334          * It will not get called when we go idle, because the idle
3335          * thread is a different class (!fair), nor will the utilization
3336          * number include things like RT tasks.
3337          *
3338          * As is, the util number is not freq-invariant (we'd have to
3339          * implement arch_scale_freq_capacity() for that).
3340          *
3341          * See cpu_util_cfs().
3342          */
3343         cpufreq_update_util(rq, flags);
3344     }
3345 }
3346 
3347 #ifdef CONFIG_SMP
3348 static inline bool load_avg_is_decayed(struct sched_avg *sa)
3349 {
3350     if (sa->load_sum)
3351         return false;
3352 
3353     if (sa->util_sum)
3354         return false;
3355 
3356     if (sa->runnable_sum)
3357         return false;
3358 
3359     /*
3360      * _avg must be null when _sum are null because _avg = _sum / divider
3361      * Make sure that rounding and/or propagation of PELT values never
3362      * break this.
3363      */
3364     SCHED_WARN_ON(sa->load_avg ||
3365               sa->util_avg ||
3366               sa->runnable_avg);
3367 
3368     return true;
3369 }
3370 
3371 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3372 {
3373     return u64_u32_load_copy(cfs_rq->avg.last_update_time,
3374                  cfs_rq->last_update_time_copy);
3375 }
3376 #ifdef CONFIG_FAIR_GROUP_SCHED
3377 /*
3378  * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
3379  * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
3380  * bottom-up, we only have to test whether the cfs_rq before us on the list
3381  * is our child.
3382  * If cfs_rq is not on the list, test whether a child needs its to be added to
3383  * connect a branch to the tree  * (see list_add_leaf_cfs_rq() for details).
3384  */
3385 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
3386 {
3387     struct cfs_rq *prev_cfs_rq;
3388     struct list_head *prev;
3389 
3390     if (cfs_rq->on_list) {
3391         prev = cfs_rq->leaf_cfs_rq_list.prev;
3392     } else {
3393         struct rq *rq = rq_of(cfs_rq);
3394 
3395         prev = rq->tmp_alone_branch;
3396     }
3397 
3398     prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
3399 
3400     return (prev_cfs_rq->tg->parent == cfs_rq->tg);
3401 }
3402 
3403 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
3404 {
3405     if (cfs_rq->load.weight)
3406         return false;
3407 
3408     if (!load_avg_is_decayed(&cfs_rq->avg))
3409         return false;
3410 
3411     if (child_cfs_rq_on_list(cfs_rq))
3412         return false;
3413 
3414     return true;
3415 }
3416 
3417 /**
3418  * update_tg_load_avg - update the tg's load avg
3419  * @cfs_rq: the cfs_rq whose avg changed
3420  *
3421  * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3422  * However, because tg->load_avg is a global value there are performance
3423  * considerations.
3424  *
3425  * In order to avoid having to look at the other cfs_rq's, we use a
3426  * differential update where we store the last value we propagated. This in
3427  * turn allows skipping updates if the differential is 'small'.
3428  *
3429  * Updating tg's load_avg is necessary before update_cfs_share().
3430  */
3431 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
3432 {
3433     long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
3434 
3435     /*
3436      * No need to update load_avg for root_task_group as it is not used.
3437      */
3438     if (cfs_rq->tg == &root_task_group)
3439         return;
3440 
3441     if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
3442         atomic_long_add(delta, &cfs_rq->tg->load_avg);
3443         cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
3444     }
3445 }
3446 
3447 /*
3448  * Called within set_task_rq() right before setting a task's CPU. The
3449  * caller only guarantees p->pi_lock is held; no other assumptions,
3450  * including the state of rq->lock, should be made.
3451  */
3452 void set_task_rq_fair(struct sched_entity *se,
3453               struct cfs_rq *prev, struct cfs_rq *next)
3454 {
3455     u64 p_last_update_time;
3456     u64 n_last_update_time;
3457 
3458     if (!sched_feat(ATTACH_AGE_LOAD))
3459         return;
3460 
3461     /*
3462      * We are supposed to update the task to "current" time, then its up to
3463      * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3464      * getting what current time is, so simply throw away the out-of-date
3465      * time. This will result in the wakee task is less decayed, but giving
3466      * the wakee more load sounds not bad.
3467      */
3468     if (!(se->avg.last_update_time && prev))
3469         return;
3470 
3471     p_last_update_time = cfs_rq_last_update_time(prev);
3472     n_last_update_time = cfs_rq_last_update_time(next);
3473 
3474     __update_load_avg_blocked_se(p_last_update_time, se);
3475     se->avg.last_update_time = n_last_update_time;
3476 }
3477 
3478 /*
3479  * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
3480  * propagate its contribution. The key to this propagation is the invariant
3481  * that for each group:
3482  *
3483  *   ge->avg == grq->avg                        (1)
3484  *
3485  * _IFF_ we look at the pure running and runnable sums. Because they
3486  * represent the very same entity, just at different points in the hierarchy.
3487  *
3488  * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
3489  * and simply copies the running/runnable sum over (but still wrong, because
3490  * the group entity and group rq do not have their PELT windows aligned).
3491  *
3492  * However, update_tg_cfs_load() is more complex. So we have:
3493  *
3494  *   ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg      (2)
3495  *
3496  * And since, like util, the runnable part should be directly transferable,
3497  * the following would _appear_ to be the straight forward approach:
3498  *
3499  *   grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg   (3)
3500  *
3501  * And per (1) we have:
3502  *
3503  *   ge->avg.runnable_avg == grq->avg.runnable_avg
3504  *
3505  * Which gives:
3506  *
3507  *                      ge->load.weight * grq->avg.load_avg
3508  *   ge->avg.load_avg = -----------------------------------     (4)
3509  *                               grq->load.weight
3510  *
3511  * Except that is wrong!
3512  *
3513  * Because while for entities historical weight is not important and we
3514  * really only care about our future and therefore can consider a pure
3515  * runnable sum, runqueues can NOT do this.
3516  *
3517  * We specifically want runqueues to have a load_avg that includes
3518  * historical weights. Those represent the blocked load, the load we expect
3519  * to (shortly) return to us. This only works by keeping the weights as
3520  * integral part of the sum. We therefore cannot decompose as per (3).
3521  *
3522  * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3523  * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3524  * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3525  * runnable section of these tasks overlap (or not). If they were to perfectly
3526  * align the rq as a whole would be runnable 2/3 of the time. If however we
3527  * always have at least 1 runnable task, the rq as a whole is always runnable.
3528  *
3529  * So we'll have to approximate.. :/
3530  *
3531  * Given the constraint:
3532  *
3533  *   ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
3534  *
3535  * We can construct a rule that adds runnable to a rq by assuming minimal
3536  * overlap.
3537  *
3538  * On removal, we'll assume each task is equally runnable; which yields:
3539  *
3540  *   grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3541  *
3542  * XXX: only do this for the part of runnable > running ?
3543  *
3544  */
3545 static inline void
3546 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3547 {
3548     long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
3549     u32 new_sum, divider;
3550 
3551     /* Nothing to update */
3552     if (!delta_avg)
3553         return;
3554 
3555     /*
3556      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3557      * See ___update_load_avg() for details.
3558      */
3559     divider = get_pelt_divider(&cfs_rq->avg);
3560 
3561 
3562     /* Set new sched_entity's utilization */
3563     se->avg.util_avg = gcfs_rq->avg.util_avg;
3564     new_sum = se->avg.util_avg * divider;
3565     delta_sum = (long)new_sum - (long)se->avg.util_sum;
3566     se->avg.util_sum = new_sum;
3567 
3568     /* Update parent cfs_rq utilization */
3569     add_positive(&cfs_rq->avg.util_avg, delta_avg);
3570     add_positive(&cfs_rq->avg.util_sum, delta_sum);
3571 
3572     /* See update_cfs_rq_load_avg() */
3573     cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
3574                       cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
3575 }
3576 
3577 static inline void
3578 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3579 {
3580     long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3581     u32 new_sum, divider;
3582 
3583     /* Nothing to update */
3584     if (!delta_avg)
3585         return;
3586 
3587     /*
3588      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3589      * See ___update_load_avg() for details.
3590      */
3591     divider = get_pelt_divider(&cfs_rq->avg);
3592 
3593     /* Set new sched_entity's runnable */
3594     se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
3595     new_sum = se->avg.runnable_avg * divider;
3596     delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
3597     se->avg.runnable_sum = new_sum;
3598 
3599     /* Update parent cfs_rq runnable */
3600     add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
3601     add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
3602     /* See update_cfs_rq_load_avg() */
3603     cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
3604                           cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
3605 }
3606 
3607 static inline void
3608 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3609 {
3610     long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3611     unsigned long load_avg;
3612     u64 load_sum = 0;
3613     s64 delta_sum;
3614     u32 divider;
3615 
3616     if (!runnable_sum)
3617         return;
3618 
3619     gcfs_rq->prop_runnable_sum = 0;
3620 
3621     /*
3622      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3623      * See ___update_load_avg() for details.
3624      */
3625     divider = get_pelt_divider(&cfs_rq->avg);
3626 
3627     if (runnable_sum >= 0) {
3628         /*
3629          * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3630          * the CPU is saturated running == runnable.
3631          */
3632         runnable_sum += se->avg.load_sum;
3633         runnable_sum = min_t(long, runnable_sum, divider);
3634     } else {
3635         /*
3636          * Estimate the new unweighted runnable_sum of the gcfs_rq by
3637          * assuming all tasks are equally runnable.
3638          */
3639         if (scale_load_down(gcfs_rq->load.weight)) {
3640             load_sum = div_u64(gcfs_rq->avg.load_sum,
3641                 scale_load_down(gcfs_rq->load.weight));
3642         }
3643 
3644         /* But make sure to not inflate se's runnable */
3645         runnable_sum = min(se->avg.load_sum, load_sum);
3646     }
3647 
3648     /*
3649      * runnable_sum can't be lower than running_sum
3650      * Rescale running sum to be in the same range as runnable sum
3651      * running_sum is in [0 : LOAD_AVG_MAX <<  SCHED_CAPACITY_SHIFT]
3652      * runnable_sum is in [0 : LOAD_AVG_MAX]
3653      */
3654     running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
3655     runnable_sum = max(runnable_sum, running_sum);
3656 
3657     load_sum = se_weight(se) * runnable_sum;
3658     load_avg = div_u64(load_sum, divider);
3659 
3660     delta_avg = load_avg - se->avg.load_avg;
3661     if (!delta_avg)
3662         return;
3663 
3664     delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
3665 
3666     se->avg.load_sum = runnable_sum;
3667     se->avg.load_avg = load_avg;
3668     add_positive(&cfs_rq->avg.load_avg, delta_avg);
3669     add_positive(&cfs_rq->avg.load_sum, delta_sum);
3670     /* See update_cfs_rq_load_avg() */
3671     cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3672                       cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3673 }
3674 
3675 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
3676 {
3677     cfs_rq->propagate = 1;
3678     cfs_rq->prop_runnable_sum += runnable_sum;
3679 }
3680 
3681 /* Update task and its cfs_rq load average */
3682 static inline int propagate_entity_load_avg(struct sched_entity *se)
3683 {
3684     struct cfs_rq *cfs_rq, *gcfs_rq;
3685 
3686     if (entity_is_task(se))
3687         return 0;
3688 
3689     gcfs_rq = group_cfs_rq(se);
3690     if (!gcfs_rq->propagate)
3691         return 0;
3692 
3693     gcfs_rq->propagate = 0;
3694 
3695     cfs_rq = cfs_rq_of(se);
3696 
3697     add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
3698 
3699     update_tg_cfs_util(cfs_rq, se, gcfs_rq);
3700     update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
3701     update_tg_cfs_load(cfs_rq, se, gcfs_rq);
3702 
3703     trace_pelt_cfs_tp(cfs_rq);
3704     trace_pelt_se_tp(se);
3705 
3706     return 1;
3707 }
3708 
3709 /*
3710  * Check if we need to update the load and the utilization of a blocked
3711  * group_entity:
3712  */
3713 static inline bool skip_blocked_update(struct sched_entity *se)
3714 {
3715     struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3716 
3717     /*
3718      * If sched_entity still have not zero load or utilization, we have to
3719      * decay it:
3720      */
3721     if (se->avg.load_avg || se->avg.util_avg)
3722         return false;
3723 
3724     /*
3725      * If there is a pending propagation, we have to update the load and
3726      * the utilization of the sched_entity:
3727      */
3728     if (gcfs_rq->propagate)
3729         return false;
3730 
3731     /*
3732      * Otherwise, the load and the utilization of the sched_entity is
3733      * already zero and there is no pending propagation, so it will be a
3734      * waste of time to try to decay it:
3735      */
3736     return true;
3737 }
3738 
3739 #else /* CONFIG_FAIR_GROUP_SCHED */
3740 
3741 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
3742 
3743 static inline int propagate_entity_load_avg(struct sched_entity *se)
3744 {
3745     return 0;
3746 }
3747 
3748 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
3749 
3750 #endif /* CONFIG_FAIR_GROUP_SCHED */
3751 
3752 #ifdef CONFIG_NO_HZ_COMMON
3753 static inline void migrate_se_pelt_lag(struct sched_entity *se)
3754 {
3755     u64 throttled = 0, now, lut;
3756     struct cfs_rq *cfs_rq;
3757     struct rq *rq;
3758     bool is_idle;
3759 
3760     if (load_avg_is_decayed(&se->avg))
3761         return;
3762 
3763     cfs_rq = cfs_rq_of(se);
3764     rq = rq_of(cfs_rq);
3765 
3766     rcu_read_lock();
3767     is_idle = is_idle_task(rcu_dereference(rq->curr));
3768     rcu_read_unlock();
3769 
3770     /*
3771      * The lag estimation comes with a cost we don't want to pay all the
3772      * time. Hence, limiting to the case where the source CPU is idle and
3773      * we know we are at the greatest risk to have an outdated clock.
3774      */
3775     if (!is_idle)
3776         return;
3777 
3778     /*
3779      * Estimated "now" is: last_update_time + cfs_idle_lag + rq_idle_lag, where:
3780      *
3781      *   last_update_time (the cfs_rq's last_update_time)
3782      *  = cfs_rq_clock_pelt()@cfs_rq_idle
3783      *      = rq_clock_pelt()@cfs_rq_idle
3784      *        - cfs->throttled_clock_pelt_time@cfs_rq_idle
3785      *
3786      *   cfs_idle_lag (delta between rq's update and cfs_rq's update)
3787      *      = rq_clock_pelt()@rq_idle - rq_clock_pelt()@cfs_rq_idle
3788      *
3789      *   rq_idle_lag (delta between now and rq's update)
3790      *      = sched_clock_cpu() - rq_clock()@rq_idle
3791      *
3792      * We can then write:
3793      *
3794      *    now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time +
3795      *          sched_clock_cpu() - rq_clock()@rq_idle
3796      * Where:
3797      *      rq_clock_pelt()@rq_idle is rq->clock_pelt_idle
3798      *      rq_clock()@rq_idle      is rq->clock_idle
3799      *      cfs->throttled_clock_pelt_time@cfs_rq_idle
3800      *                              is cfs_rq->throttled_pelt_idle
3801      */
3802 
3803 #ifdef CONFIG_CFS_BANDWIDTH
3804     throttled = u64_u32_load(cfs_rq->throttled_pelt_idle);
3805     /* The clock has been stopped for throttling */
3806     if (throttled == U64_MAX)
3807         return;
3808 #endif
3809     now = u64_u32_load(rq->clock_pelt_idle);
3810     /*
3811      * Paired with _update_idle_rq_clock_pelt(). It ensures at the worst case
3812      * is observed the old clock_pelt_idle value and the new clock_idle,
3813      * which lead to an underestimation. The opposite would lead to an
3814      * overestimation.
3815      */
3816     smp_rmb();
3817     lut = cfs_rq_last_update_time(cfs_rq);
3818 
3819     now -= throttled;
3820     if (now < lut)
3821         /*
3822          * cfs_rq->avg.last_update_time is more recent than our
3823          * estimation, let's use it.
3824          */
3825         now = lut;
3826     else
3827         now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
3828 
3829     __update_load_avg_blocked_se(now, se);
3830 }
3831 #else
3832 static void migrate_se_pelt_lag(struct sched_entity *se) {}
3833 #endif
3834 
3835 /**
3836  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3837  * @now: current time, as per cfs_rq_clock_pelt()
3838  * @cfs_rq: cfs_rq to update
3839  *
3840  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3841  * avg. The immediate corollary is that all (fair) tasks must be attached, see
3842  * post_init_entity_util_avg().
3843  *
3844  * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3845  *
3846  * Return: true if the load decayed or we removed load.
3847  *
3848  * Since both these conditions indicate a changed cfs_rq->avg.load we should
3849  * call update_tg_load_avg() when this function returns true.
3850  */
3851 static inline int
3852 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3853 {
3854     unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
3855     struct sched_avg *sa = &cfs_rq->avg;
3856     int decayed = 0;
3857 
3858     if (cfs_rq->removed.nr) {
3859         unsigned long r;
3860         u32 divider = get_pelt_divider(&cfs_rq->avg);
3861 
3862         raw_spin_lock(&cfs_rq->removed.lock);
3863         swap(cfs_rq->removed.util_avg, removed_util);
3864         swap(cfs_rq->removed.load_avg, removed_load);
3865         swap(cfs_rq->removed.runnable_avg, removed_runnable);
3866         cfs_rq->removed.nr = 0;
3867         raw_spin_unlock(&cfs_rq->removed.lock);
3868 
3869         r = removed_load;
3870         sub_positive(&sa->load_avg, r);
3871         sub_positive(&sa->load_sum, r * divider);
3872         /* See sa->util_sum below */
3873         sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
3874 
3875         r = removed_util;
3876         sub_positive(&sa->util_avg, r);
3877         sub_positive(&sa->util_sum, r * divider);
3878         /*
3879          * Because of rounding, se->util_sum might ends up being +1 more than
3880          * cfs->util_sum. Although this is not a problem by itself, detaching
3881          * a lot of tasks with the rounding problem between 2 updates of
3882          * util_avg (~1ms) can make cfs->util_sum becoming null whereas
3883          * cfs_util_avg is not.
3884          * Check that util_sum is still above its lower bound for the new
3885          * util_avg. Given that period_contrib might have moved since the last
3886          * sync, we are only sure that util_sum must be above or equal to
3887          *    util_avg * minimum possible divider
3888          */
3889         sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
3890 
3891         r = removed_runnable;
3892         sub_positive(&sa->runnable_avg, r);
3893         sub_positive(&sa->runnable_sum, r * divider);
3894         /* See sa->util_sum above */
3895         sa->runnable_sum = max_t(u32, sa->runnable_sum,
3896                           sa->runnable_avg * PELT_MIN_DIVIDER);
3897 
3898         /*
3899          * removed_runnable is the unweighted version of removed_load so we
3900          * can use it to estimate removed_load_sum.
3901          */
3902         add_tg_cfs_propagate(cfs_rq,
3903             -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
3904 
3905         decayed = 1;
3906     }
3907 
3908     decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
3909     u64_u32_store_copy(sa->last_update_time,
3910                cfs_rq->last_update_time_copy,
3911                sa->last_update_time);
3912     return decayed;
3913 }
3914 
3915 /**
3916  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3917  * @cfs_rq: cfs_rq to attach to
3918  * @se: sched_entity to attach
3919  *
3920  * Must call update_cfs_rq_load_avg() before this, since we rely on
3921  * cfs_rq->avg.last_update_time being current.
3922  */
3923 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3924 {
3925     /*
3926      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3927      * See ___update_load_avg() for details.
3928      */
3929     u32 divider = get_pelt_divider(&cfs_rq->avg);
3930 
3931     /*
3932      * When we attach the @se to the @cfs_rq, we must align the decay
3933      * window because without that, really weird and wonderful things can
3934      * happen.
3935      *
3936      * XXX illustrate
3937      */
3938     se->avg.last_update_time = cfs_rq->avg.last_update_time;
3939     se->avg.period_contrib = cfs_rq->avg.period_contrib;
3940 
3941     /*
3942      * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
3943      * period_contrib. This isn't strictly correct, but since we're
3944      * entirely outside of the PELT hierarchy, nobody cares if we truncate
3945      * _sum a little.
3946      */
3947     se->avg.util_sum = se->avg.util_avg * divider;
3948 
3949     se->avg.runnable_sum = se->avg.runnable_avg * divider;
3950 
3951     se->avg.load_sum = se->avg.load_avg * divider;
3952     if (se_weight(se) < se->avg.load_sum)
3953         se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
3954     else
3955         se->avg.load_sum = 1;
3956 
3957     enqueue_load_avg(cfs_rq, se);
3958     cfs_rq->avg.util_avg += se->avg.util_avg;
3959     cfs_rq->avg.util_sum += se->avg.util_sum;
3960     cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
3961     cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
3962 
3963     add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
3964 
3965     cfs_rq_util_change(cfs_rq, 0);
3966 
3967     trace_pelt_cfs_tp(cfs_rq);
3968 }
3969 
3970 /**
3971  * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3972  * @cfs_rq: cfs_rq to detach from
3973  * @se: sched_entity to detach
3974  *
3975  * Must call update_cfs_rq_load_avg() before this, since we rely on
3976  * cfs_rq->avg.last_update_time being current.
3977  */
3978 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3979 {
3980     dequeue_load_avg(cfs_rq, se);
3981     sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3982     sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3983     /* See update_cfs_rq_load_avg() */
3984     cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
3985                       cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
3986 
3987     sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
3988     sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
3989     /* See update_cfs_rq_load_avg() */
3990     cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
3991                           cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
3992 
3993     add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
3994 
3995     cfs_rq_util_change(cfs_rq, 0);
3996 
3997     trace_pelt_cfs_tp(cfs_rq);
3998 }
3999 
4000 /*
4001  * Optional action to be done while updating the load average
4002  */
4003 #define UPDATE_TG   0x1
4004 #define SKIP_AGE_LOAD   0x2
4005 #define DO_ATTACH   0x4
4006 
4007 /* Update task and its cfs_rq load average */
4008 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4009 {
4010     u64 now = cfs_rq_clock_pelt(cfs_rq);
4011     int decayed;
4012 
4013     /*
4014      * Track task load average for carrying it to new CPU after migrated, and
4015      * track group sched_entity load average for task_h_load calc in migration
4016      */
4017     if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
4018         __update_load_avg_se(now, cfs_rq, se);
4019 
4020     decayed  = update_cfs_rq_load_avg(now, cfs_rq);
4021     decayed |= propagate_entity_load_avg(se);
4022 
4023     if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
4024 
4025         /*
4026          * DO_ATTACH means we're here from enqueue_entity().
4027          * !last_update_time means we've passed through
4028          * migrate_task_rq_fair() indicating we migrated.
4029          *
4030          * IOW we're enqueueing a task on a new CPU.
4031          */
4032         attach_entity_load_avg(cfs_rq, se);
4033         update_tg_load_avg(cfs_rq);
4034 
4035     } else if (decayed) {
4036         cfs_rq_util_change(cfs_rq, 0);
4037 
4038         if (flags & UPDATE_TG)
4039             update_tg_load_avg(cfs_rq);
4040     }
4041 }
4042 
4043 /*
4044  * Synchronize entity load avg of dequeued entity without locking
4045  * the previous rq.
4046  */
4047 static void sync_entity_load_avg(struct sched_entity *se)
4048 {
4049     struct cfs_rq *cfs_rq = cfs_rq_of(se);
4050     u64 last_update_time;
4051 
4052     last_update_time = cfs_rq_last_update_time(cfs_rq);
4053     __update_load_avg_blocked_se(last_update_time, se);
4054 }
4055 
4056 /*
4057  * Task first catches up with cfs_rq, and then subtract
4058  * itself from the cfs_rq (task must be off the queue now).
4059  */
4060 static void remove_entity_load_avg(struct sched_entity *se)
4061 {
4062     struct cfs_rq *cfs_rq = cfs_rq_of(se);
4063     unsigned long flags;
4064 
4065     /*
4066      * tasks cannot exit without having gone through wake_up_new_task() ->
4067      * post_init_entity_util_avg() which will have added things to the
4068      * cfs_rq, so we can remove unconditionally.
4069      */
4070 
4071     sync_entity_load_avg(se);
4072 
4073     raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
4074     ++cfs_rq->removed.nr;
4075     cfs_rq->removed.util_avg    += se->avg.util_avg;
4076     cfs_rq->removed.load_avg    += se->avg.load_avg;
4077     cfs_rq->removed.runnable_avg    += se->avg.runnable_avg;
4078     raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
4079 }
4080 
4081 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
4082 {
4083     return cfs_rq->avg.runnable_avg;
4084 }
4085 
4086 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
4087 {
4088     return cfs_rq->avg.load_avg;
4089 }
4090 
4091 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
4092 
4093 static inline unsigned long task_util(struct task_struct *p)
4094 {
4095     return READ_ONCE(p->se.avg.util_avg);
4096 }
4097 
4098 static inline unsigned long _task_util_est(struct task_struct *p)
4099 {
4100     struct util_est ue = READ_ONCE(p->se.avg.util_est);
4101 
4102     return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
4103 }
4104 
4105 static inline unsigned long task_util_est(struct task_struct *p)
4106 {
4107     return max(task_util(p), _task_util_est(p));
4108 }
4109 
4110 #ifdef CONFIG_UCLAMP_TASK
4111 static inline unsigned long uclamp_task_util(struct task_struct *p)
4112 {
4113     return clamp(task_util_est(p),
4114              uclamp_eff_value(p, UCLAMP_MIN),
4115              uclamp_eff_value(p, UCLAMP_MAX));
4116 }
4117 #else
4118 static inline unsigned long uclamp_task_util(struct task_struct *p)
4119 {
4120     return task_util_est(p);
4121 }
4122 #endif
4123 
4124 static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
4125                     struct task_struct *p)
4126 {
4127     unsigned int enqueued;
4128 
4129     if (!sched_feat(UTIL_EST))
4130         return;
4131 
4132     /* Update root cfs_rq's estimated utilization */
4133     enqueued  = cfs_rq->avg.util_est.enqueued;
4134     enqueued += _task_util_est(p);
4135     WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4136 
4137     trace_sched_util_est_cfs_tp(cfs_rq);
4138 }
4139 
4140 static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
4141                     struct task_struct *p)
4142 {
4143     unsigned int enqueued;
4144 
4145     if (!sched_feat(UTIL_EST))
4146         return;
4147 
4148     /* Update root cfs_rq's estimated utilization */
4149     enqueued  = cfs_rq->avg.util_est.enqueued;
4150     enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
4151     WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4152 
4153     trace_sched_util_est_cfs_tp(cfs_rq);
4154 }
4155 
4156 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
4157 
4158 /*
4159  * Check if a (signed) value is within a specified (unsigned) margin,
4160  * based on the observation that:
4161  *
4162  *     abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
4163  *
4164  * NOTE: this only works when value + margin < INT_MAX.
4165  */
4166 static inline bool within_margin(int value, int margin)
4167 {
4168     return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
4169 }
4170 
4171 static inline void util_est_update(struct cfs_rq *cfs_rq,
4172                    struct task_struct *p,
4173                    bool task_sleep)
4174 {
4175     long last_ewma_diff, last_enqueued_diff;
4176     struct util_est ue;
4177 
4178     if (!sched_feat(UTIL_EST))
4179         return;
4180 
4181     /*
4182      * Skip update of task's estimated utilization when the task has not
4183      * yet completed an activation, e.g. being migrated.
4184      */
4185     if (!task_sleep)
4186         return;
4187 
4188     /*
4189      * If the PELT values haven't changed since enqueue time,
4190      * skip the util_est update.
4191      */
4192     ue = p->se.avg.util_est;
4193     if (ue.enqueued & UTIL_AVG_UNCHANGED)
4194         return;
4195 
4196     last_enqueued_diff = ue.enqueued;
4197 
4198     /*
4199      * Reset EWMA on utilization increases, the moving average is used only
4200      * to smooth utilization decreases.
4201      */
4202     ue.enqueued = task_util(p);
4203     if (sched_feat(UTIL_EST_FASTUP)) {
4204         if (ue.ewma < ue.enqueued) {
4205             ue.ewma = ue.enqueued;
4206             goto done;
4207         }
4208     }
4209 
4210     /*
4211      * Skip update of task's estimated utilization when its members are
4212      * already ~1% close to its last activation value.
4213      */
4214     last_ewma_diff = ue.enqueued - ue.ewma;
4215     last_enqueued_diff -= ue.enqueued;
4216     if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
4217         if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
4218             goto done;
4219 
4220         return;
4221     }
4222 
4223     /*
4224      * To avoid overestimation of actual task utilization, skip updates if
4225      * we cannot grant there is idle time in this CPU.
4226      */
4227     if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
4228         return;
4229 
4230     /*
4231      * Update Task's estimated utilization
4232      *
4233      * When *p completes an activation we can consolidate another sample
4234      * of the task size. This is done by storing the current PELT value
4235      * as ue.enqueued and by using this value to update the Exponential
4236      * Weighted Moving Average (EWMA):
4237      *
4238      *  ewma(t) = w *  task_util(p) + (1-w) * ewma(t-1)
4239      *          = w *  task_util(p) +         ewma(t-1)  - w * ewma(t-1)
4240      *          = w * (task_util(p) -         ewma(t-1)) +     ewma(t-1)
4241      *          = w * (      last_ewma_diff            ) +     ewma(t-1)
4242      *          = w * (last_ewma_diff  +  ewma(t-1) / w)
4243      *
4244      * Where 'w' is the weight of new samples, which is configured to be
4245      * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
4246      */
4247     ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
4248     ue.ewma  += last_ewma_diff;
4249     ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
4250 done:
4251     ue.enqueued |= UTIL_AVG_UNCHANGED;
4252     WRITE_ONCE(p->se.avg.util_est, ue);
4253 
4254     trace_sched_util_est_se_tp(&p->se);
4255 }
4256 
4257 static inline int task_fits_capacity(struct task_struct *p,
4258                      unsigned long capacity)
4259 {
4260     return fits_capacity(uclamp_task_util(p), capacity);
4261 }
4262 
4263 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
4264 {
4265     if (!static_branch_unlikely(&sched_asym_cpucapacity))
4266         return;
4267 
4268     if (!p || p->nr_cpus_allowed == 1) {
4269         rq->misfit_task_load = 0;
4270         return;
4271     }
4272 
4273     if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
4274         rq->misfit_task_load = 0;
4275         return;
4276     }
4277 
4278     /*
4279      * Make sure that misfit_task_load will not be null even if
4280      * task_h_load() returns 0.
4281      */
4282     rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
4283 }
4284 
4285 #else /* CONFIG_SMP */
4286 
4287 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
4288 {
4289     return true;
4290 }
4291 
4292 #define UPDATE_TG   0x0
4293 #define SKIP_AGE_LOAD   0x0
4294 #define DO_ATTACH   0x0
4295 
4296 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
4297 {
4298     cfs_rq_util_change(cfs_rq, 0);
4299 }
4300 
4301 static inline void remove_entity_load_avg(struct sched_entity *se) {}
4302 
4303 static inline void
4304 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
4305 static inline void
4306 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
4307 
4308 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
4309 {
4310     return 0;
4311 }
4312 
4313 static inline void
4314 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
4315 
4316 static inline void
4317 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
4318 
4319 static inline void
4320 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
4321         bool task_sleep) {}
4322 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
4323 
4324 #endif /* CONFIG_SMP */
4325 
4326 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
4327 {
4328 #ifdef CONFIG_SCHED_DEBUG
4329     s64 d = se->vruntime - cfs_rq->min_vruntime;
4330 
4331     if (d < 0)
4332         d = -d;
4333 
4334     if (d > 3*sysctl_sched_latency)
4335         schedstat_inc(cfs_rq->nr_spread_over);
4336 #endif
4337 }
4338 
4339 static void
4340 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
4341 {
4342     u64 vruntime = cfs_rq->min_vruntime;
4343 
4344     /*
4345      * The 'current' period is already promised to the current tasks,
4346      * however the extra weight of the new task will slow them down a
4347      * little, place the new task so that it fits in the slot that
4348      * stays open at the end.
4349      */
4350     if (initial && sched_feat(START_DEBIT))
4351         vruntime += sched_vslice(cfs_rq, se);
4352 
4353     /* sleeps up to a single latency don't count. */
4354     if (!initial) {
4355         unsigned long thresh;
4356 
4357         if (se_is_idle(se))
4358             thresh = sysctl_sched_min_granularity;
4359         else
4360             thresh = sysctl_sched_latency;
4361 
4362         /*
4363          * Halve their sleep time's effect, to allow
4364          * for a gentler effect of sleepers:
4365          */
4366         if (sched_feat(GENTLE_FAIR_SLEEPERS))
4367             thresh >>= 1;
4368 
4369         vruntime -= thresh;
4370     }
4371 
4372     /* ensure we never gain time by being placed backwards. */
4373     se->vruntime = max_vruntime(se->vruntime, vruntime);
4374 }
4375 
4376 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
4377 
4378 static inline bool cfs_bandwidth_used(void);
4379 
4380 /*
4381  * MIGRATION
4382  *
4383  *  dequeue
4384  *    update_curr()
4385  *      update_min_vruntime()
4386  *    vruntime -= min_vruntime
4387  *
4388  *  enqueue
4389  *    update_curr()
4390  *      update_min_vruntime()
4391  *    vruntime += min_vruntime
4392  *
4393  * this way the vruntime transition between RQs is done when both
4394  * min_vruntime are up-to-date.
4395  *
4396  * WAKEUP (remote)
4397  *
4398  *  ->migrate_task_rq_fair() (p->state == TASK_WAKING)
4399  *    vruntime -= min_vruntime
4400  *
4401  *  enqueue
4402  *    update_curr()
4403  *      update_min_vruntime()
4404  *    vruntime += min_vruntime
4405  *
4406  * this way we don't have the most up-to-date min_vruntime on the originating
4407  * CPU and an up-to-date min_vruntime on the destination CPU.
4408  */
4409 
4410 static void
4411 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4412 {
4413     bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
4414     bool curr = cfs_rq->curr == se;
4415 
4416     /*
4417      * If we're the current task, we must renormalise before calling
4418      * update_curr().
4419      */
4420     if (renorm && curr)
4421         se->vruntime += cfs_rq->min_vruntime;
4422 
4423     update_curr(cfs_rq);
4424 
4425     /*
4426      * Otherwise, renormalise after, such that we're placed at the current
4427      * moment in time, instead of some random moment in the past. Being
4428      * placed in the past could significantly boost this task to the
4429      * fairness detriment of existing tasks.
4430      */
4431     if (renorm && !curr)
4432         se->vruntime += cfs_rq->min_vruntime;
4433 
4434     /*
4435      * When enqueuing a sched_entity, we must:
4436      *   - Update loads to have both entity and cfs_rq synced with now.
4437      *   - Add its load to cfs_rq->runnable_avg
4438      *   - For group_entity, update its weight to reflect the new share of
4439      *     its group cfs_rq
4440      *   - Add its new weight to cfs_rq->load.weight
4441      */
4442     update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
4443     se_update_runnable(se);
4444     update_cfs_group(se);
4445     account_entity_enqueue(cfs_rq, se);
4446 
4447     if (flags & ENQUEUE_WAKEUP)
4448         place_entity(cfs_rq, se, 0);
4449 
4450     check_schedstat_required();
4451     update_stats_enqueue_fair(cfs_rq, se, flags);
4452     check_spread(cfs_rq, se);
4453     if (!curr)
4454         __enqueue_entity(cfs_rq, se);
4455     se->on_rq = 1;
4456 
4457     if (cfs_rq->nr_running == 1) {
4458         check_enqueue_throttle(cfs_rq);
4459         if (!throttled_hierarchy(cfs_rq))
4460             list_add_leaf_cfs_rq(cfs_rq);
4461     }
4462 }
4463 
4464 static void __clear_buddies_last(struct sched_entity *se)
4465 {
4466     for_each_sched_entity(se) {
4467         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4468         if (cfs_rq->last != se)
4469             break;
4470 
4471         cfs_rq->last = NULL;
4472     }
4473 }
4474 
4475 static void __clear_buddies_next(struct sched_entity *se)
4476 {
4477     for_each_sched_entity(se) {
4478         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4479         if (cfs_rq->next != se)
4480             break;
4481 
4482         cfs_rq->next = NULL;
4483     }
4484 }
4485 
4486 static void __clear_buddies_skip(struct sched_entity *se)
4487 {
4488     for_each_sched_entity(se) {
4489         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4490         if (cfs_rq->skip != se)
4491             break;
4492 
4493         cfs_rq->skip = NULL;
4494     }
4495 }
4496 
4497 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
4498 {
4499     if (cfs_rq->last == se)
4500         __clear_buddies_last(se);
4501 
4502     if (cfs_rq->next == se)
4503         __clear_buddies_next(se);
4504 
4505     if (cfs_rq->skip == se)
4506         __clear_buddies_skip(se);
4507 }
4508 
4509 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4510 
4511 static void
4512 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4513 {
4514     /*
4515      * Update run-time statistics of the 'current'.
4516      */
4517     update_curr(cfs_rq);
4518 
4519     /*
4520      * When dequeuing a sched_entity, we must:
4521      *   - Update loads to have both entity and cfs_rq synced with now.
4522      *   - Subtract its load from the cfs_rq->runnable_avg.
4523      *   - Subtract its previous weight from cfs_rq->load.weight.
4524      *   - For group entity, update its weight to reflect the new share
4525      *     of its group cfs_rq.
4526      */
4527     update_load_avg(cfs_rq, se, UPDATE_TG);
4528     se_update_runnable(se);
4529 
4530     update_stats_dequeue_fair(cfs_rq, se, flags);
4531 
4532     clear_buddies(cfs_rq, se);
4533 
4534     if (se != cfs_rq->curr)
4535         __dequeue_entity(cfs_rq, se);
4536     se->on_rq = 0;
4537     account_entity_dequeue(cfs_rq, se);
4538 
4539     /*
4540      * Normalize after update_curr(); which will also have moved
4541      * min_vruntime if @se is the one holding it back. But before doing
4542      * update_min_vruntime() again, which will discount @se's position and
4543      * can move min_vruntime forward still more.
4544      */
4545     if (!(flags & DEQUEUE_SLEEP))
4546         se->vruntime -= cfs_rq->min_vruntime;
4547 
4548     /* return excess runtime on last dequeue */
4549     return_cfs_rq_runtime(cfs_rq);
4550 
4551     update_cfs_group(se);
4552 
4553     /*
4554      * Now advance min_vruntime if @se was the entity holding it back,
4555      * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
4556      * put back on, and if we advance min_vruntime, we'll be placed back
4557      * further than we started -- ie. we'll be penalized.
4558      */
4559     if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
4560         update_min_vruntime(cfs_rq);
4561 
4562     if (cfs_rq->nr_running == 0)
4563         update_idle_cfs_rq_clock_pelt(cfs_rq);
4564 }
4565 
4566 /*
4567  * Preempt the current task with a newly woken task if needed:
4568  */
4569 static void
4570 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4571 {
4572     unsigned long ideal_runtime, delta_exec;
4573     struct sched_entity *se;
4574     s64 delta;
4575 
4576     ideal_runtime = sched_slice(cfs_rq, curr);
4577     delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
4578     if (delta_exec > ideal_runtime) {
4579         resched_curr(rq_of(cfs_rq));
4580         /*
4581          * The current task ran long enough, ensure it doesn't get
4582          * re-elected due to buddy favours.
4583          */
4584         clear_buddies(cfs_rq, curr);
4585         return;
4586     }
4587 
4588     /*
4589      * Ensure that a task that missed wakeup preemption by a
4590      * narrow margin doesn't have to wait for a full slice.
4591      * This also mitigates buddy induced latencies under load.
4592      */
4593     if (delta_exec < sysctl_sched_min_granularity)
4594         return;
4595 
4596     se = __pick_first_entity(cfs_rq);
4597     delta = curr->vruntime - se->vruntime;
4598 
4599     if (delta < 0)
4600         return;
4601 
4602     if (delta > ideal_runtime)
4603         resched_curr(rq_of(cfs_rq));
4604 }
4605 
4606 static void
4607 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
4608 {
4609     clear_buddies(cfs_rq, se);
4610 
4611     /* 'current' is not kept within the tree. */
4612     if (se->on_rq) {
4613         /*
4614          * Any task has to be enqueued before it get to execute on
4615          * a CPU. So account for the time it spent waiting on the
4616          * runqueue.
4617          */
4618         update_stats_wait_end_fair(cfs_rq, se);
4619         __dequeue_entity(cfs_rq, se);
4620         update_load_avg(cfs_rq, se, UPDATE_TG);
4621     }
4622 
4623     update_stats_curr_start(cfs_rq, se);
4624     cfs_rq->curr = se;
4625 
4626     /*
4627      * Track our maximum slice length, if the CPU's load is at
4628      * least twice that of our own weight (i.e. dont track it
4629      * when there are only lesser-weight tasks around):
4630      */
4631     if (schedstat_enabled() &&
4632         rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
4633         struct sched_statistics *stats;
4634 
4635         stats = __schedstats_from_se(se);
4636         __schedstat_set(stats->slice_max,
4637                 max((u64)stats->slice_max,
4638                     se->sum_exec_runtime - se->prev_sum_exec_runtime));
4639     }
4640 
4641     se->prev_sum_exec_runtime = se->sum_exec_runtime;
4642 }
4643 
4644 static int
4645 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4646 
4647 /*
4648  * Pick the next process, keeping these things in mind, in this order:
4649  * 1) keep things fair between processes/task groups
4650  * 2) pick the "next" process, since someone really wants that to run
4651  * 3) pick the "last" process, for cache locality
4652  * 4) do not run the "skip" process, if something else is available
4653  */
4654 static struct sched_entity *
4655 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4656 {
4657     struct sched_entity *left = __pick_first_entity(cfs_rq);
4658     struct sched_entity *se;
4659 
4660     /*
4661      * If curr is set we have to see if its left of the leftmost entity
4662      * still in the tree, provided there was anything in the tree at all.
4663      */
4664     if (!left || (curr && entity_before(curr, left)))
4665         left = curr;
4666 
4667     se = left; /* ideally we run the leftmost entity */
4668 
4669     /*
4670      * Avoid running the skip buddy, if running something else can
4671      * be done without getting too unfair.
4672      */
4673     if (cfs_rq->skip && cfs_rq->skip == se) {
4674         struct sched_entity *second;
4675 
4676         if (se == curr) {
4677             second = __pick_first_entity(cfs_rq);
4678         } else {
4679             second = __pick_next_entity(se);
4680             if (!second || (curr && entity_before(curr, second)))
4681                 second = curr;
4682         }
4683 
4684         if (second && wakeup_preempt_entity(second, left) < 1)
4685             se = second;
4686     }
4687 
4688     if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) {
4689         /*
4690          * Someone really wants this to run. If it's not unfair, run it.
4691          */
4692         se = cfs_rq->next;
4693     } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) {
4694         /*
4695          * Prefer last buddy, try to return the CPU to a preempted task.
4696          */
4697         se = cfs_rq->last;
4698     }
4699 
4700     return se;
4701 }
4702 
4703 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4704 
4705 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
4706 {
4707     /*
4708      * If still on the runqueue then deactivate_task()
4709      * was not called and update_curr() has to be done:
4710      */
4711     if (prev->on_rq)
4712         update_curr(cfs_rq);
4713 
4714     /* throttle cfs_rqs exceeding runtime */
4715     check_cfs_rq_runtime(cfs_rq);
4716 
4717     check_spread(cfs_rq, prev);
4718 
4719     if (prev->on_rq) {
4720         update_stats_wait_start_fair(cfs_rq, prev);
4721         /* Put 'current' back into the tree. */
4722         __enqueue_entity(cfs_rq, prev);
4723         /* in !on_rq case, update occurred at dequeue */
4724         update_load_avg(cfs_rq, prev, 0);
4725     }
4726     cfs_rq->curr = NULL;
4727 }
4728 
4729 static void
4730 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
4731 {
4732     /*
4733      * Update run-time statistics of the 'current'.
4734      */
4735     update_curr(cfs_rq);
4736 
4737     /*
4738      * Ensure that runnable average is periodically updated.
4739      */
4740     update_load_avg(cfs_rq, curr, UPDATE_TG);
4741     update_cfs_group(curr);
4742 
4743 #ifdef CONFIG_SCHED_HRTICK
4744     /*
4745      * queued ticks are scheduled to match the slice, so don't bother
4746      * validating it and just reschedule.
4747      */
4748     if (queued) {
4749         resched_curr(rq_of(cfs_rq));
4750         return;
4751     }
4752     /*
4753      * don't let the period tick interfere with the hrtick preemption
4754      */
4755     if (!sched_feat(DOUBLE_TICK) &&
4756             hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
4757         return;
4758 #endif
4759 
4760     if (cfs_rq->nr_running > 1)
4761         check_preempt_tick(cfs_rq, curr);
4762 }
4763 
4764 
4765 /**************************************************
4766  * CFS bandwidth control machinery
4767  */
4768 
4769 #ifdef CONFIG_CFS_BANDWIDTH
4770 
4771 #ifdef CONFIG_JUMP_LABEL
4772 static struct static_key __cfs_bandwidth_used;
4773 
4774 static inline bool cfs_bandwidth_used(void)
4775 {
4776     return static_key_false(&__cfs_bandwidth_used);
4777 }
4778 
4779 void cfs_bandwidth_usage_inc(void)
4780 {
4781     static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
4782 }
4783 
4784 void cfs_bandwidth_usage_dec(void)
4785 {
4786     static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
4787 }
4788 #else /* CONFIG_JUMP_LABEL */
4789 static bool cfs_bandwidth_used(void)
4790 {
4791     return true;
4792 }
4793 
4794 void cfs_bandwidth_usage_inc(void) {}
4795 void cfs_bandwidth_usage_dec(void) {}
4796 #endif /* CONFIG_JUMP_LABEL */
4797 
4798 /*
4799  * default period for cfs group bandwidth.
4800  * default: 0.1s, units: nanoseconds
4801  */
4802 static inline u64 default_cfs_period(void)
4803 {
4804     return 100000000ULL;
4805 }
4806 
4807 static inline u64 sched_cfs_bandwidth_slice(void)
4808 {
4809     return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
4810 }
4811 
4812 /*
4813  * Replenish runtime according to assigned quota. We use sched_clock_cpu
4814  * directly instead of rq->clock to avoid adding additional synchronization
4815  * around rq->lock.
4816  *
4817  * requires cfs_b->lock
4818  */
4819 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
4820 {
4821     s64 runtime;
4822 
4823     if (unlikely(cfs_b->quota == RUNTIME_INF))
4824         return;
4825 
4826     cfs_b->runtime += cfs_b->quota;
4827     runtime = cfs_b->runtime_snap - cfs_b->runtime;
4828     if (runtime > 0) {
4829         cfs_b->burst_time += runtime;
4830         cfs_b->nr_burst++;
4831     }
4832 
4833     cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
4834     cfs_b->runtime_snap = cfs_b->runtime;
4835 }
4836 
4837 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4838 {
4839     return &tg->cfs_bandwidth;
4840 }
4841 
4842 /* returns 0 on failure to allocate runtime */
4843 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
4844                    struct cfs_rq *cfs_rq, u64 target_runtime)
4845 {
4846     u64 min_amount, amount = 0;
4847 
4848     lockdep_assert_held(&cfs_b->lock);
4849 
4850     /* note: this is a positive sum as runtime_remaining <= 0 */
4851     min_amount = target_runtime - cfs_rq->runtime_remaining;
4852 
4853     if (cfs_b->quota == RUNTIME_INF)
4854         amount = min_amount;
4855     else {
4856         start_cfs_bandwidth(cfs_b);
4857 
4858         if (cfs_b->runtime > 0) {
4859             amount = min(cfs_b->runtime, min_amount);
4860             cfs_b->runtime -= amount;
4861             cfs_b->idle = 0;
4862         }
4863     }
4864 
4865     cfs_rq->runtime_remaining += amount;
4866 
4867     return cfs_rq->runtime_remaining > 0;
4868 }
4869 
4870 /* returns 0 on failure to allocate runtime */
4871 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4872 {
4873     struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4874     int ret;
4875 
4876     raw_spin_lock(&cfs_b->lock);
4877     ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
4878     raw_spin_unlock(&cfs_b->lock);
4879 
4880     return ret;
4881 }
4882 
4883 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4884 {
4885     /* dock delta_exec before expiring quota (as it could span periods) */
4886     cfs_rq->runtime_remaining -= delta_exec;
4887 
4888     if (likely(cfs_rq->runtime_remaining > 0))
4889         return;
4890 
4891     if (cfs_rq->throttled)
4892         return;
4893     /*
4894      * if we're unable to extend our runtime we resched so that the active
4895      * hierarchy can be throttled
4896      */
4897     if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
4898         resched_curr(rq_of(cfs_rq));
4899 }
4900 
4901 static __always_inline
4902 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4903 {
4904     if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
4905         return;
4906 
4907     __account_cfs_rq_runtime(cfs_rq, delta_exec);
4908 }
4909 
4910 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4911 {
4912     return cfs_bandwidth_used() && cfs_rq->throttled;
4913 }
4914 
4915 /* check whether cfs_rq, or any parent, is throttled */
4916 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4917 {
4918     return cfs_bandwidth_used() && cfs_rq->throttle_count;
4919 }
4920 
4921 /*
4922  * Ensure that neither of the group entities corresponding to src_cpu or
4923  * dest_cpu are members of a throttled hierarchy when performing group
4924  * load-balance operations.
4925  */
4926 static inline int throttled_lb_pair(struct task_group *tg,
4927                     int src_cpu, int dest_cpu)
4928 {
4929     struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
4930 
4931     src_cfs_rq = tg->cfs_rq[src_cpu];
4932     dest_cfs_rq = tg->cfs_rq[dest_cpu];
4933 
4934     return throttled_hierarchy(src_cfs_rq) ||
4935            throttled_hierarchy(dest_cfs_rq);
4936 }
4937 
4938 static int tg_unthrottle_up(struct task_group *tg, void *data)
4939 {
4940     struct rq *rq = data;
4941     struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4942 
4943     cfs_rq->throttle_count--;
4944     if (!cfs_rq->throttle_count) {
4945         cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
4946                          cfs_rq->throttled_clock_pelt;
4947 
4948         /* Add cfs_rq with load or one or more already running entities to the list */
4949         if (!cfs_rq_is_decayed(cfs_rq))
4950             list_add_leaf_cfs_rq(cfs_rq);
4951     }
4952 
4953     return 0;
4954 }
4955 
4956 static int tg_throttle_down(struct task_group *tg, void *data)
4957 {
4958     struct rq *rq = data;
4959     struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4960 
4961     /* group is entering throttled state, stop time */
4962     if (!cfs_rq->throttle_count) {
4963         cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
4964         list_del_leaf_cfs_rq(cfs_rq);
4965     }
4966     cfs_rq->throttle_count++;
4967 
4968     return 0;
4969 }
4970 
4971 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
4972 {
4973     struct rq *rq = rq_of(cfs_rq);
4974     struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4975     struct sched_entity *se;
4976     long task_delta, idle_task_delta, dequeue = 1;
4977 
4978     raw_spin_lock(&cfs_b->lock);
4979     /* This will start the period timer if necessary */
4980     if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
4981         /*
4982          * We have raced with bandwidth becoming available, and if we
4983          * actually throttled the timer might not unthrottle us for an
4984          * entire period. We additionally needed to make sure that any
4985          * subsequent check_cfs_rq_runtime calls agree not to throttle
4986          * us, as we may commit to do cfs put_prev+pick_next, so we ask
4987          * for 1ns of runtime rather than just check cfs_b.
4988          */
4989         dequeue = 0;
4990     } else {
4991         list_add_tail_rcu(&cfs_rq->throttled_list,
4992                   &cfs_b->throttled_cfs_rq);
4993     }
4994     raw_spin_unlock(&cfs_b->lock);
4995 
4996     if (!dequeue)
4997         return false;  /* Throttle no longer required. */
4998 
4999     se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5000 
5001     /* freeze hierarchy runnable averages while throttled */
5002     rcu_read_lock();
5003     walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5004     rcu_read_unlock();
5005 
5006     task_delta = cfs_rq->h_nr_running;
5007     idle_task_delta = cfs_rq->idle_h_nr_running;
5008     for_each_sched_entity(se) {
5009         struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5010         /* throttled entity or throttle-on-deactivate */
5011         if (!se->on_rq)
5012             goto done;
5013 
5014         dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
5015 
5016         if (cfs_rq_is_idle(group_cfs_rq(se)))
5017             idle_task_delta = cfs_rq->h_nr_running;
5018 
5019         qcfs_rq->h_nr_running -= task_delta;
5020         qcfs_rq->idle_h_nr_running -= idle_task_delta;
5021 
5022         if (qcfs_rq->load.weight) {
5023             /* Avoid re-evaluating load for this entity: */
5024             se = parent_entity(se);
5025             break;
5026         }
5027     }
5028 
5029     for_each_sched_entity(se) {
5030         struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5031         /* throttled entity or throttle-on-deactivate */
5032         if (!se->on_rq)
5033             goto done;
5034 
5035         update_load_avg(qcfs_rq, se, 0);
5036         se_update_runnable(se);
5037 
5038         if (cfs_rq_is_idle(group_cfs_rq(se)))
5039             idle_task_delta = cfs_rq->h_nr_running;
5040 
5041         qcfs_rq->h_nr_running -= task_delta;
5042         qcfs_rq->idle_h_nr_running -= idle_task_delta;
5043     }
5044 
5045     /* At this point se is NULL and we are at root level*/
5046     sub_nr_running(rq, task_delta);
5047 
5048 done:
5049     /*
5050      * Note: distribution will already see us throttled via the
5051      * throttled-list.  rq->lock protects completion.
5052      */
5053     cfs_rq->throttled = 1;
5054     cfs_rq->throttled_clock = rq_clock(rq);
5055     return true;
5056 }
5057 
5058 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
5059 {
5060     struct rq *rq = rq_of(cfs_rq);
5061     struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5062     struct sched_entity *se;
5063     long task_delta, idle_task_delta;
5064 
5065     se = cfs_rq->tg->se[cpu_of(rq)];
5066 
5067     cfs_rq->throttled = 0;
5068 
5069     update_rq_clock(rq);
5070 
5071     raw_spin_lock(&cfs_b->lock);
5072     cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5073     list_del_rcu(&cfs_rq->throttled_list);
5074     raw_spin_unlock(&cfs_b->lock);
5075 
5076     /* update hierarchical throttle state */
5077     walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5078 
5079     if (!cfs_rq->load.weight) {
5080         if (!cfs_rq->on_list)
5081             return;
5082         /*
5083          * Nothing to run but something to decay (on_list)?
5084          * Complete the branch.
5085          */
5086         for_each_sched_entity(se) {
5087             if (list_add_leaf_cfs_rq(cfs_rq_of(se)))
5088                 break;
5089         }
5090         goto unthrottle_throttle;
5091     }
5092 
5093     task_delta = cfs_rq->h_nr_running;
5094     idle_task_delta = cfs_rq->idle_h_nr_running;
5095     for_each_sched_entity(se) {
5096         struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5097 
5098         if (se->on_rq)
5099             break;
5100         enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
5101 
5102         if (cfs_rq_is_idle(group_cfs_rq(se)))
5103             idle_task_delta = cfs_rq->h_nr_running;
5104 
5105         qcfs_rq->h_nr_running += task_delta;
5106         qcfs_rq->idle_h_nr_running += idle_task_delta;
5107 
5108         /* end evaluation on encountering a throttled cfs_rq */
5109         if (cfs_rq_throttled(qcfs_rq))
5110             goto unthrottle_throttle;
5111     }
5112 
5113     for_each_sched_entity(se) {
5114         struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5115 
5116         update_load_avg(qcfs_rq, se, UPDATE_TG);
5117         se_update_runnable(se);
5118 
5119         if (cfs_rq_is_idle(group_cfs_rq(se)))
5120             idle_task_delta = cfs_rq->h_nr_running;
5121 
5122         qcfs_rq->h_nr_running += task_delta;
5123         qcfs_rq->idle_h_nr_running += idle_task_delta;
5124 
5125         /* end evaluation on encountering a throttled cfs_rq */
5126         if (cfs_rq_throttled(qcfs_rq))
5127             goto unthrottle_throttle;
5128     }
5129 
5130     /* At this point se is NULL and we are at root level*/
5131     add_nr_running(rq, task_delta);
5132 
5133 unthrottle_throttle:
5134     assert_list_leaf_cfs_rq(rq);
5135 
5136     /* Determine whether we need to wake up potentially idle CPU: */
5137     if (rq->curr == rq->idle && rq->cfs.nr_running)
5138         resched_curr(rq);
5139 }
5140 
5141 static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
5142 {
5143     struct cfs_rq *cfs_rq;
5144     u64 runtime, remaining = 1;
5145 
5146     rcu_read_lock();
5147     list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
5148                 throttled_list) {
5149         struct rq *rq = rq_of(cfs_rq);
5150         struct rq_flags rf;
5151 
5152         rq_lock_irqsave(rq, &rf);
5153         if (!cfs_rq_throttled(cfs_rq))
5154             goto next;
5155 
5156         /* By the above check, this should never be true */
5157         SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
5158 
5159         raw_spin_lock(&cfs_b->lock);
5160         runtime = -cfs_rq->runtime_remaining + 1;
5161         if (runtime > cfs_b->runtime)
5162             runtime = cfs_b->runtime;
5163         cfs_b->runtime -= runtime;
5164         remaining = cfs_b->runtime;
5165         raw_spin_unlock(&cfs_b->lock);
5166 
5167         cfs_rq->runtime_remaining += runtime;
5168 
5169         /* we check whether we're throttled above */
5170         if (cfs_rq->runtime_remaining > 0)
5171             unthrottle_cfs_rq(cfs_rq);
5172 
5173 next:
5174         rq_unlock_irqrestore(rq, &rf);
5175 
5176         if (!remaining)
5177             break;
5178     }
5179     rcu_read_unlock();
5180 }
5181 
5182 /*
5183  * Responsible for refilling a task_group's bandwidth and unthrottling its
5184  * cfs_rqs as appropriate. If there has been no activity within the last
5185  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
5186  * used to track this state.
5187  */
5188 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
5189 {
5190     int throttled;
5191 
5192     /* no need to continue the timer with no bandwidth constraint */
5193     if (cfs_b->quota == RUNTIME_INF)
5194         goto out_deactivate;
5195 
5196     throttled = !list_empty(&cfs_b->throttled_cfs_rq);
5197     cfs_b->nr_periods += overrun;
5198 
5199     /* Refill extra burst quota even if cfs_b->idle */
5200     __refill_cfs_bandwidth_runtime(cfs_b);
5201 
5202     /*
5203      * idle depends on !throttled (for the case of a large deficit), and if
5204      * we're going inactive then everything else can be deferred
5205      */
5206     if (cfs_b->idle && !throttled)
5207         goto out_deactivate;
5208 
5209     if (!throttled) {
5210         /* mark as potentially idle for the upcoming period */
5211         cfs_b->idle = 1;
5212         return 0;
5213     }
5214 
5215     /* account preceding periods in which throttling occurred */
5216     cfs_b->nr_throttled += overrun;
5217 
5218     /*
5219      * This check is repeated as we release cfs_b->lock while we unthrottle.
5220      */
5221     while (throttled && cfs_b->runtime > 0) {
5222         raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5223         /* we can't nest cfs_b->lock while distributing bandwidth */
5224         distribute_cfs_runtime(cfs_b);
5225         raw_spin_lock_irqsave(&cfs_b->lock, flags);
5226 
5227         throttled = !list_empty(&cfs_b->throttled_cfs_rq);
5228     }
5229 
5230     /*
5231      * While we are ensured activity in the period following an
5232      * unthrottle, this also covers the case in which the new bandwidth is
5233      * insufficient to cover the existing bandwidth deficit.  (Forcing the
5234      * timer to remain active while there are any throttled entities.)
5235      */
5236     cfs_b->idle = 0;
5237 
5238     return 0;
5239 
5240 out_deactivate:
5241     return 1;
5242 }
5243 
5244 /* a cfs_rq won't donate quota below this amount */
5245 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
5246 /* minimum remaining period time to redistribute slack quota */
5247 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
5248 /* how long we wait to gather additional slack before distributing */
5249 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
5250 
5251 /*
5252  * Are we near the end of the current quota period?
5253  *
5254  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
5255  * hrtimer base being cleared by hrtimer_start. In the case of
5256  * migrate_hrtimers, base is never cleared, so we are fine.
5257  */
5258 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
5259 {
5260     struct hrtimer *refresh_timer = &cfs_b->period_timer;
5261     s64 remaining;
5262 
5263     /* if the call-back is running a quota refresh is already occurring */
5264     if (hrtimer_callback_running(refresh_timer))
5265         return 1;
5266 
5267     /* is a quota refresh about to occur? */
5268     remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
5269     if (remaining < (s64)min_expire)
5270         return 1;
5271 
5272     return 0;
5273 }
5274 
5275 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
5276 {
5277     u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
5278 
5279     /* if there's a quota refresh soon don't bother with slack */
5280     if (runtime_refresh_within(cfs_b, min_left))
5281         return;
5282 
5283     /* don't push forwards an existing deferred unthrottle */
5284     if (cfs_b->slack_started)
5285         return;
5286     cfs_b->slack_started = true;
5287 
5288     hrtimer_start(&cfs_b->slack_timer,
5289             ns_to_ktime(cfs_bandwidth_slack_period),
5290             HRTIMER_MODE_REL);
5291 }
5292 
5293 /* we know any runtime found here is valid as update_curr() precedes return */
5294 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5295 {
5296     struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5297     s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
5298 
5299     if (slack_runtime <= 0)
5300         return;
5301 
5302     raw_spin_lock(&cfs_b->lock);
5303     if (cfs_b->quota != RUNTIME_INF) {
5304         cfs_b->runtime += slack_runtime;
5305 
5306         /* we are under rq->lock, defer unthrottling using a timer */
5307         if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
5308             !list_empty(&cfs_b->throttled_cfs_rq))
5309             start_cfs_slack_bandwidth(cfs_b);
5310     }
5311     raw_spin_unlock(&cfs_b->lock);
5312 
5313     /* even if it's not valid for return we don't want to try again */
5314     cfs_rq->runtime_remaining -= slack_runtime;
5315 }
5316 
5317 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5318 {
5319     if (!cfs_bandwidth_used())
5320         return;
5321 
5322     if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
5323         return;
5324 
5325     __return_cfs_rq_runtime(cfs_rq);
5326 }
5327 
5328 /*
5329  * This is done with a timer (instead of inline with bandwidth return) since
5330  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
5331  */
5332 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
5333 {
5334     u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
5335     unsigned long flags;
5336 
5337     /* confirm we're still not at a refresh boundary */
5338     raw_spin_lock_irqsave(&cfs_b->lock, flags);
5339     cfs_b->slack_started = false;
5340 
5341     if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
5342         raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5343         return;
5344     }
5345 
5346     if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
5347         runtime = cfs_b->runtime;
5348 
5349     raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5350 
5351     if (!runtime)
5352         return;
5353 
5354     distribute_cfs_runtime(cfs_b);
5355 }
5356 
5357 /*
5358  * When a group wakes up we want to make sure that its quota is not already
5359  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
5360  * runtime as update_curr() throttling can not trigger until it's on-rq.
5361  */
5362 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
5363 {
5364     if (!cfs_bandwidth_used())
5365         return;
5366 
5367     /* an active group must be handled by the update_curr()->put() path */
5368     if (!cfs_rq->runtime_enabled || cfs_rq->curr)
5369         return;
5370 
5371     /* ensure the group is not already throttled */
5372     if (cfs_rq_throttled(cfs_rq))
5373         return;
5374 
5375     /* update runtime allocation */
5376     account_cfs_rq_runtime(cfs_rq, 0);
5377     if (cfs_rq->runtime_remaining <= 0)
5378         throttle_cfs_rq(cfs_rq);
5379 }
5380 
5381 static void sync_throttle(struct task_group *tg, int cpu)
5382 {
5383     struct cfs_rq *pcfs_rq, *cfs_rq;
5384 
5385     if (!cfs_bandwidth_used())
5386         return;
5387 
5388     if (!tg->parent)
5389         return;
5390 
5391     cfs_rq = tg->cfs_rq[cpu];
5392     pcfs_rq = tg->parent->cfs_rq[cpu];
5393 
5394     cfs_rq->throttle_count = pcfs_rq->throttle_count;
5395     cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
5396 }
5397 
5398 /* conditionally throttle active cfs_rq's from put_prev_entity() */
5399 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5400 {
5401     if (!cfs_bandwidth_used())
5402         return false;
5403 
5404     if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
5405         return false;
5406 
5407     /*
5408      * it's possible for a throttled entity to be forced into a running
5409      * state (e.g. set_curr_task), in this case we're finished.
5410      */
5411     if (cfs_rq_throttled(cfs_rq))
5412         return true;
5413 
5414     return throttle_cfs_rq(cfs_rq);
5415 }
5416 
5417 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
5418 {
5419     struct cfs_bandwidth *cfs_b =
5420         container_of(timer, struct cfs_bandwidth, slack_timer);
5421 
5422     do_sched_cfs_slack_timer(cfs_b);
5423 
5424     return HRTIMER_NORESTART;
5425 }
5426 
5427 extern const u64 max_cfs_quota_period;
5428 
5429 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
5430 {
5431     struct cfs_bandwidth *cfs_b =
5432         container_of(timer, struct cfs_bandwidth, period_timer);
5433     unsigned long flags;
5434     int overrun;
5435     int idle = 0;
5436     int count = 0;
5437 
5438     raw_spin_lock_irqsave(&cfs_b->lock, flags);
5439     for (;;) {
5440         overrun = hrtimer_forward_now(timer, cfs_b->period);
5441         if (!overrun)
5442             break;
5443 
5444         idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
5445 
5446         if (++count > 3) {
5447             u64 new, old = ktime_to_ns(cfs_b->period);
5448 
5449             /*
5450              * Grow period by a factor of 2 to avoid losing precision.
5451              * Precision loss in the quota/period ratio can cause __cfs_schedulable
5452              * to fail.
5453              */
5454             new = old * 2;
5455             if (new < max_cfs_quota_period) {
5456                 cfs_b->period = ns_to_ktime(new);
5457                 cfs_b->quota *= 2;
5458                 cfs_b->burst *= 2;
5459 
5460                 pr_warn_ratelimited(
5461     "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5462                     smp_processor_id(),
5463                     div_u64(new, NSEC_PER_USEC),
5464                     div_u64(cfs_b->quota, NSEC_PER_USEC));
5465             } else {
5466                 pr_warn_ratelimited(
5467     "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5468                     smp_processor_id(),
5469                     div_u64(old, NSEC_PER_USEC),
5470                     div_u64(cfs_b->quota, NSEC_PER_USEC));
5471             }
5472 
5473             /* reset count so we don't come right back in here */
5474             count = 0;
5475         }
5476     }
5477     if (idle)
5478         cfs_b->period_active = 0;
5479     raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5480 
5481     return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
5482 }
5483 
5484 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5485 {
5486     raw_spin_lock_init(&cfs_b->lock);
5487     cfs_b->runtime = 0;
5488     cfs_b->quota = RUNTIME_INF;
5489     cfs_b->period = ns_to_ktime(default_cfs_period());
5490     cfs_b->burst = 0;
5491 
5492     INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
5493     hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
5494     cfs_b->period_timer.function = sched_cfs_period_timer;
5495     hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5496     cfs_b->slack_timer.function = sched_cfs_slack_timer;
5497     cfs_b->slack_started = false;
5498 }
5499 
5500 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5501 {
5502     cfs_rq->runtime_enabled = 0;
5503     INIT_LIST_HEAD(&cfs_rq->throttled_list);
5504 }
5505 
5506 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5507 {
5508     lockdep_assert_held(&cfs_b->lock);
5509 
5510     if (cfs_b->period_active)
5511         return;
5512 
5513     cfs_b->period_active = 1;
5514     hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
5515     hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
5516 }
5517 
5518 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5519 {
5520     /* init_cfs_bandwidth() was not called */
5521     if (!cfs_b->throttled_cfs_rq.next)
5522         return;
5523 
5524     hrtimer_cancel(&cfs_b->period_timer);
5525     hrtimer_cancel(&cfs_b->slack_timer);
5526 }
5527 
5528 /*
5529  * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
5530  *
5531  * The race is harmless, since modifying bandwidth settings of unhooked group
5532  * bits doesn't do much.
5533  */
5534 
5535 /* cpu online callback */
5536 static void __maybe_unused update_runtime_enabled(struct rq *rq)
5537 {
5538     struct task_group *tg;
5539 
5540     lockdep_assert_rq_held(rq);
5541 
5542     rcu_read_lock();
5543     list_for_each_entry_rcu(tg, &task_groups, list) {
5544         struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
5545         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5546 
5547         raw_spin_lock(&cfs_b->lock);
5548         cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
5549         raw_spin_unlock(&cfs_b->lock);
5550     }
5551     rcu_read_unlock();
5552 }
5553 
5554 /* cpu offline callback */
5555 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
5556 {
5557     struct task_group *tg;
5558 
5559     lockdep_assert_rq_held(rq);
5560 
5561     rcu_read_lock();
5562     list_for_each_entry_rcu(tg, &task_groups, list) {
5563         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5564 
5565         if (!cfs_rq->runtime_enabled)
5566             continue;
5567 
5568         /*
5569          * clock_task is not advancing so we just need to make sure
5570          * there's some valid quota amount
5571          */
5572         cfs_rq->runtime_remaining = 1;
5573         /*
5574          * Offline rq is schedulable till CPU is completely disabled
5575          * in take_cpu_down(), so we prevent new cfs throttling here.
5576          */
5577         cfs_rq->runtime_enabled = 0;
5578 
5579         if (cfs_rq_throttled(cfs_rq))
5580             unthrottle_cfs_rq(cfs_rq);
5581     }
5582     rcu_read_unlock();
5583 }
5584 
5585 #else /* CONFIG_CFS_BANDWIDTH */
5586 
5587 static inline bool cfs_bandwidth_used(void)
5588 {
5589     return false;
5590 }
5591 
5592 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
5593 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
5594 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
5595 static inline void sync_throttle(struct task_group *tg, int cpu) {}
5596 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
5597 
5598 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
5599 {
5600     return 0;
5601 }
5602 
5603 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
5604 {
5605     return 0;
5606 }
5607 
5608 static inline int throttled_lb_pair(struct task_group *tg,
5609                     int src_cpu, int dest_cpu)
5610 {
5611     return 0;
5612 }
5613 
5614 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
5615 
5616 #ifdef CONFIG_FAIR_GROUP_SCHED
5617 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
5618 #endif
5619 
5620 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
5621 {
5622     return NULL;
5623 }
5624 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
5625 static inline void update_runtime_enabled(struct rq *rq) {}
5626 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
5627 
5628 #endif /* CONFIG_CFS_BANDWIDTH */
5629 
5630 /**************************************************
5631  * CFS operations on tasks:
5632  */
5633 
5634 #ifdef CONFIG_SCHED_HRTICK
5635 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
5636 {
5637     struct sched_entity *se = &p->se;
5638     struct cfs_rq *cfs_rq = cfs_rq_of(se);
5639 
5640     SCHED_WARN_ON(task_rq(p) != rq);
5641 
5642     if (rq->cfs.h_nr_running > 1) {
5643         u64 slice = sched_slice(cfs_rq, se);
5644         u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
5645         s64 delta = slice - ran;
5646 
5647         if (delta < 0) {
5648             if (task_current(rq, p))
5649                 resched_curr(rq);
5650             return;
5651         }
5652         hrtick_start(rq, delta);
5653     }
5654 }
5655 
5656 /*
5657  * called from enqueue/dequeue and updates the hrtick when the
5658  * current task is from our class and nr_running is low enough
5659  * to matter.
5660  */
5661 static void hrtick_update(struct rq *rq)
5662 {
5663     struct task_struct *curr = rq->curr;
5664 
5665     if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
5666         return;
5667 
5668     if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
5669         hrtick_start_fair(rq, curr);
5670 }
5671 #else /* !CONFIG_SCHED_HRTICK */
5672 static inline void
5673 hrtick_start_fair(struct rq *rq, struct task_struct *p)
5674 {
5675 }
5676 
5677 static inline void hrtick_update(struct rq *rq)
5678 {
5679 }
5680 #endif
5681 
5682 #ifdef CONFIG_SMP
5683 static inline bool cpu_overutilized(int cpu)
5684 {
5685     return !fits_capacity(cpu_util_cfs(cpu), capacity_of(cpu));
5686 }
5687 
5688 static inline void update_overutilized_status(struct rq *rq)
5689 {
5690     if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
5691         WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
5692         trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
5693     }
5694 }
5695 #else
5696 static inline void update_overutilized_status(struct rq *rq) { }
5697 #endif
5698 
5699 /* Runqueue only has SCHED_IDLE tasks enqueued */
5700 static int sched_idle_rq(struct rq *rq)
5701 {
5702     return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
5703             rq->nr_running);
5704 }
5705 
5706 /*
5707  * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use
5708  * of idle_nr_running, which does not consider idle descendants of normal
5709  * entities.
5710  */
5711 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq)
5712 {
5713     return cfs_rq->nr_running &&
5714         cfs_rq->nr_running == cfs_rq->idle_nr_running;
5715 }
5716 
5717 #ifdef CONFIG_SMP
5718 static int sched_idle_cpu(int cpu)
5719 {
5720     return sched_idle_rq(cpu_rq(cpu));
5721 }
5722 #endif
5723 
5724 /*
5725  * The enqueue_task method is called before nr_running is
5726  * increased. Here we update the fair scheduling stats and
5727  * then put the task into the rbtree:
5728  */
5729 static void
5730 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5731 {
5732     struct cfs_rq *cfs_rq;
5733     struct sched_entity *se = &p->se;
5734     int idle_h_nr_running = task_has_idle_policy(p);
5735     int task_new = !(flags & ENQUEUE_WAKEUP);
5736 
5737     /*
5738      * The code below (indirectly) updates schedutil which looks at
5739      * the cfs_rq utilization to select a frequency.
5740      * Let's add the task's estimated utilization to the cfs_rq's
5741      * estimated utilization, before we update schedutil.
5742      */
5743     util_est_enqueue(&rq->cfs, p);
5744 
5745     /*
5746      * If in_iowait is set, the code below may not trigger any cpufreq
5747      * utilization updates, so do it here explicitly with the IOWAIT flag
5748      * passed.
5749      */
5750     if (p->in_iowait)
5751         cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
5752 
5753     for_each_sched_entity(se) {
5754         if (se->on_rq)
5755             break;
5756         cfs_rq = cfs_rq_of(se);
5757         enqueue_entity(cfs_rq, se, flags);
5758 
5759         cfs_rq->h_nr_running++;
5760         cfs_rq->idle_h_nr_running += idle_h_nr_running;
5761 
5762         if (cfs_rq_is_idle(cfs_rq))
5763             idle_h_nr_running = 1;
5764 
5765         /* end evaluation on encountering a throttled cfs_rq */
5766         if (cfs_rq_throttled(cfs_rq))
5767             goto enqueue_throttle;
5768 
5769         flags = ENQUEUE_WAKEUP;
5770     }
5771 
5772     for_each_sched_entity(se) {
5773         cfs_rq = cfs_rq_of(se);
5774 
5775         update_load_avg(cfs_rq, se, UPDATE_TG);
5776         se_update_runnable(se);
5777         update_cfs_group(se);
5778 
5779         cfs_rq->h_nr_running++;
5780         cfs_rq->idle_h_nr_running += idle_h_nr_running;
5781 
5782         if (cfs_rq_is_idle(cfs_rq))
5783             idle_h_nr_running = 1;
5784 
5785         /* end evaluation on encountering a throttled cfs_rq */
5786         if (cfs_rq_throttled(cfs_rq))
5787             goto enqueue_throttle;
5788     }
5789 
5790     /* At this point se is NULL and we are at root level*/
5791     add_nr_running(rq, 1);
5792 
5793     /*
5794      * Since new tasks are assigned an initial util_avg equal to
5795      * half of the spare capacity of their CPU, tiny tasks have the
5796      * ability to cross the overutilized threshold, which will
5797      * result in the load balancer ruining all the task placement
5798      * done by EAS. As a way to mitigate that effect, do not account
5799      * for the first enqueue operation of new tasks during the
5800      * overutilized flag detection.
5801      *
5802      * A better way of solving this problem would be to wait for
5803      * the PELT signals of tasks to converge before taking them
5804      * into account, but that is not straightforward to implement,
5805      * and the following generally works well enough in practice.
5806      */
5807     if (!task_new)
5808         update_overutilized_status(rq);
5809 
5810 enqueue_throttle:
5811     assert_list_leaf_cfs_rq(rq);
5812 
5813     hrtick_update(rq);
5814 }
5815 
5816 static void set_next_buddy(struct sched_entity *se);
5817 
5818 /*
5819  * The dequeue_task method is called before nr_running is
5820  * decreased. We remove the task from the rbtree and
5821  * update the fair scheduling stats:
5822  */
5823 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5824 {
5825     struct cfs_rq *cfs_rq;
5826     struct sched_entity *se = &p->se;
5827     int task_sleep = flags & DEQUEUE_SLEEP;
5828     int idle_h_nr_running = task_has_idle_policy(p);
5829     bool was_sched_idle = sched_idle_rq(rq);
5830 
5831     util_est_dequeue(&rq->cfs, p);
5832 
5833     for_each_sched_entity(se) {
5834         cfs_rq = cfs_rq_of(se);
5835         dequeue_entity(cfs_rq, se, flags);
5836 
5837         cfs_rq->h_nr_running--;
5838         cfs_rq->idle_h_nr_running -= idle_h_nr_running;
5839 
5840         if (cfs_rq_is_idle(cfs_rq))
5841             idle_h_nr_running = 1;
5842 
5843         /* end evaluation on encountering a throttled cfs_rq */
5844         if (cfs_rq_throttled(cfs_rq))
5845             goto dequeue_throttle;
5846 
5847         /* Don't dequeue parent if it has other entities besides us */
5848         if (cfs_rq->load.weight) {
5849             /* Avoid re-evaluating load for this entity: */
5850             se = parent_entity(se);
5851             /*
5852              * Bias pick_next to pick a task from this cfs_rq, as
5853              * p is sleeping when it is within its sched_slice.
5854              */
5855             if (task_sleep && se && !throttled_hierarchy(cfs_rq))
5856                 set_next_buddy(se);
5857             break;
5858         }
5859         flags |= DEQUEUE_SLEEP;
5860     }
5861 
5862     for_each_sched_entity(se) {
5863         cfs_rq = cfs_rq_of(se);
5864 
5865         update_load_avg(cfs_rq, se, UPDATE_TG);
5866         se_update_runnable(se);
5867         update_cfs_group(se);
5868 
5869         cfs_rq->h_nr_running--;
5870         cfs_rq->idle_h_nr_running -= idle_h_nr_running;
5871 
5872         if (cfs_rq_is_idle(cfs_rq))
5873             idle_h_nr_running = 1;
5874 
5875         /* end evaluation on encountering a throttled cfs_rq */
5876         if (cfs_rq_throttled(cfs_rq))
5877             goto dequeue_throttle;
5878 
5879     }
5880 
5881     /* At this point se is NULL and we are at root level*/
5882     sub_nr_running(rq, 1);
5883 
5884     /* balance early to pull high priority tasks */
5885     if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
5886         rq->next_balance = jiffies;
5887 
5888 dequeue_throttle:
5889     util_est_update(&rq->cfs, p, task_sleep);
5890     hrtick_update(rq);
5891 }
5892 
5893 #ifdef CONFIG_SMP
5894 
5895 /* Working cpumask for: load_balance, load_balance_newidle. */
5896 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
5897 DEFINE_PER_CPU(cpumask_var_t, select_rq_mask);
5898 
5899 #ifdef CONFIG_NO_HZ_COMMON
5900 
5901 static struct {
5902     cpumask_var_t idle_cpus_mask;
5903     atomic_t nr_cpus;
5904     int has_blocked;        /* Idle CPUS has blocked load */
5905     int needs_update;       /* Newly idle CPUs need their next_balance collated */
5906     unsigned long next_balance;     /* in jiffy units */
5907     unsigned long next_blocked; /* Next update of blocked load in jiffies */
5908 } nohz ____cacheline_aligned;
5909 
5910 #endif /* CONFIG_NO_HZ_COMMON */
5911 
5912 static unsigned long cpu_load(struct rq *rq)
5913 {
5914     return cfs_rq_load_avg(&rq->cfs);
5915 }
5916 
5917 /*
5918  * cpu_load_without - compute CPU load without any contributions from *p
5919  * @cpu: the CPU which load is requested
5920  * @p: the task which load should be discounted
5921  *
5922  * The load of a CPU is defined by the load of tasks currently enqueued on that
5923  * CPU as well as tasks which are currently sleeping after an execution on that
5924  * CPU.
5925  *
5926  * This method returns the load of the specified CPU by discounting the load of
5927  * the specified task, whenever the task is currently contributing to the CPU
5928  * load.
5929  */
5930 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
5931 {
5932     struct cfs_rq *cfs_rq;
5933     unsigned int load;
5934 
5935     /* Task has no contribution or is new */
5936     if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
5937         return cpu_load(rq);
5938 
5939     cfs_rq = &rq->cfs;
5940     load = READ_ONCE(cfs_rq->avg.load_avg);
5941 
5942     /* Discount task's util from CPU's util */
5943     lsub_positive(&load, task_h_load(p));
5944 
5945     return load;
5946 }
5947 
5948 static unsigned long cpu_runnable(struct rq *rq)
5949 {
5950     return cfs_rq_runnable_avg(&rq->cfs);
5951 }
5952 
5953 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
5954 {
5955     struct cfs_rq *cfs_rq;
5956     unsigned int runnable;
5957 
5958     /* Task has no contribution or is new */
5959     if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
5960         return cpu_runnable(rq);
5961 
5962     cfs_rq = &rq->cfs;
5963     runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
5964 
5965     /* Discount task's runnable from CPU's runnable */
5966     lsub_positive(&runnable, p->se.avg.runnable_avg);
5967 
5968     return runnable;
5969 }
5970 
5971 static unsigned long capacity_of(int cpu)
5972 {
5973     return cpu_rq(cpu)->cpu_capacity;
5974 }
5975 
5976 static void record_wakee(struct task_struct *p)
5977 {
5978     /*
5979      * Only decay a single time; tasks that have less then 1 wakeup per
5980      * jiffy will not have built up many flips.
5981      */
5982     if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5983         current->wakee_flips >>= 1;
5984         current->wakee_flip_decay_ts = jiffies;
5985     }
5986 
5987     if (current->last_wakee != p) {
5988         current->last_wakee = p;
5989         current->wakee_flips++;
5990     }
5991 }
5992 
5993 /*
5994  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
5995  *
5996  * A waker of many should wake a different task than the one last awakened
5997  * at a frequency roughly N times higher than one of its wakees.
5998  *
5999  * In order to determine whether we should let the load spread vs consolidating
6000  * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
6001  * partner, and a factor of lls_size higher frequency in the other.
6002  *
6003  * With both conditions met, we can be relatively sure that the relationship is
6004  * non-monogamous, with partner count exceeding socket size.
6005  *
6006  * Waker/wakee being client/server, worker/dispatcher, interrupt source or
6007  * whatever is irrelevant, spread criteria is apparent partner count exceeds
6008  * socket size.
6009  */
6010 static int wake_wide(struct task_struct *p)
6011 {
6012     unsigned int master = current->wakee_flips;
6013     unsigned int slave = p->wakee_flips;
6014     int factor = __this_cpu_read(sd_llc_size);
6015 
6016     if (master < slave)
6017         swap(master, slave);
6018     if (slave < factor || master < slave * factor)
6019         return 0;
6020     return 1;
6021 }
6022 
6023 /*
6024  * The purpose of wake_affine() is to quickly determine on which CPU we can run
6025  * soonest. For the purpose of speed we only consider the waking and previous
6026  * CPU.
6027  *
6028  * wake_affine_idle() - only considers 'now', it check if the waking CPU is
6029  *          cache-affine and is (or will be) idle.
6030  *
6031  * wake_affine_weight() - considers the weight to reflect the average
6032  *            scheduling latency of the CPUs. This seems to work
6033  *            for the overloaded case.
6034  */
6035 static int
6036 wake_affine_idle(int this_cpu, int prev_cpu, int sync)
6037 {
6038     /*
6039      * If this_cpu is idle, it implies the wakeup is from interrupt
6040      * context. Only allow the move if cache is shared. Otherwise an
6041      * interrupt intensive workload could force all tasks onto one
6042      * node depending on the IO topology or IRQ affinity settings.
6043      *
6044      * If the prev_cpu is idle and cache affine then avoid a migration.
6045      * There is no guarantee that the cache hot data from an interrupt
6046      * is more important than cache hot data on the prev_cpu and from
6047      * a cpufreq perspective, it's better to have higher utilisation
6048      * on one CPU.
6049      */
6050     if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
6051         return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
6052 
6053     if (sync && cpu_rq(this_cpu)->nr_running == 1)
6054         return this_cpu;
6055 
6056     if (available_idle_cpu(prev_cpu))
6057         return prev_cpu;
6058 
6059     return nr_cpumask_bits;
6060 }
6061 
6062 static int
6063 wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
6064            int this_cpu, int prev_cpu, int sync)
6065 {
6066     s64 this_eff_load, prev_eff_load;
6067     unsigned long task_load;
6068 
6069     this_eff_load = cpu_load(cpu_rq(this_cpu));
6070 
6071     if (sync) {
6072         unsigned long current_load = task_h_load(current);
6073 
6074         if (current_load > this_eff_load)
6075             return this_cpu;
6076 
6077         this_eff_load -= current_load;
6078     }
6079 
6080     task_load = task_h_load(p);
6081 
6082     this_eff_load += task_load;
6083     if (sched_feat(WA_BIAS))
6084         this_eff_load *= 100;
6085     this_eff_load *= capacity_of(prev_cpu);
6086 
6087     prev_eff_load = cpu_load(cpu_rq(prev_cpu));
6088     prev_eff_load -= task_load;
6089     if (sched_feat(WA_BIAS))
6090         prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
6091     prev_eff_load *= capacity_of(this_cpu);
6092 
6093     /*
6094      * If sync, adjust the weight of prev_eff_load such that if
6095      * prev_eff == this_eff that select_idle_sibling() will consider
6096      * stacking the wakee on top of the waker if no other CPU is
6097      * idle.
6098      */
6099     if (sync)
6100         prev_eff_load += 1;
6101 
6102     return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
6103 }
6104 
6105 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
6106                int this_cpu, int prev_cpu, int sync)
6107 {
6108     int target = nr_cpumask_bits;
6109 
6110     if (sched_feat(WA_IDLE))
6111         target = wake_affine_idle(this_cpu, prev_cpu, sync);
6112 
6113     if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
6114         target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
6115 
6116     schedstat_inc(p->stats.nr_wakeups_affine_attempts);
6117     if (target == nr_cpumask_bits)
6118         return prev_cpu;
6119 
6120     schedstat_inc(sd->ttwu_move_affine);
6121     schedstat_inc(p->stats.nr_wakeups_affine);
6122     return target;
6123 }
6124 
6125 static struct sched_group *
6126 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
6127 
6128 /*
6129  * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
6130  */
6131 static int
6132 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
6133 {
6134     unsigned long load, min_load = ULONG_MAX;
6135     unsigned int min_exit_latency = UINT_MAX;
6136     u64 latest_idle_timestamp = 0;
6137     int least_loaded_cpu = this_cpu;
6138     int shallowest_idle_cpu = -1;
6139     int i;
6140 
6141     /* Check if we have any choice: */
6142     if (group->group_weight == 1)
6143         return cpumask_first(sched_group_span(group));
6144 
6145     /* Traverse only the allowed CPUs */
6146     for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
6147         struct rq *rq = cpu_rq(i);
6148 
6149         if (!sched_core_cookie_match(rq, p))
6150             continue;
6151 
6152         if (sched_idle_cpu(i))
6153             return i;
6154 
6155         if (available_idle_cpu(i)) {
6156             struct cpuidle_state *idle = idle_get_state(rq);
6157             if (idle && idle->exit_latency < min_exit_latency) {
6158                 /*
6159                  * We give priority to a CPU whose idle state
6160                  * has the smallest exit latency irrespective
6161                  * of any idle timestamp.
6162                  */
6163                 min_exit_latency = idle->exit_latency;
6164                 latest_idle_timestamp = rq->idle_stamp;
6165                 shallowest_idle_cpu = i;
6166             } else if ((!idle || idle->exit_latency == min_exit_latency) &&
6167                    rq->idle_stamp > latest_idle_timestamp) {
6168                 /*
6169                  * If equal or no active idle state, then
6170                  * the most recently idled CPU might have
6171                  * a warmer cache.
6172                  */
6173                 latest_idle_timestamp = rq->idle_stamp;
6174                 shallowest_idle_cpu = i;
6175             }
6176         } else if (shallowest_idle_cpu == -1) {
6177             load = cpu_load(cpu_rq(i));
6178             if (load < min_load) {
6179                 min_load = load;
6180                 least_loaded_cpu = i;
6181             }
6182         }
6183     }
6184 
6185     return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
6186 }
6187 
6188 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
6189                   int cpu, int prev_cpu, int sd_flag)
6190 {
6191     int new_cpu = cpu;
6192 
6193     if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
6194         return prev_cpu;
6195 
6196     /*
6197      * We need task's util for cpu_util_without, sync it up to
6198      * prev_cpu's last_update_time.
6199      */
6200     if (!(sd_flag & SD_BALANCE_FORK))
6201         sync_entity_load_avg(&p->se);
6202 
6203     while (sd) {
6204         struct sched_group *group;
6205         struct sched_domain *tmp;
6206         int weight;
6207 
6208         if (!(sd->flags & sd_flag)) {
6209             sd = sd->child;
6210             continue;
6211         }
6212 
6213         group = find_idlest_group(sd, p, cpu);
6214         if (!group) {
6215             sd = sd->child;
6216             continue;
6217         }
6218 
6219         new_cpu = find_idlest_group_cpu(group, p, cpu);
6220         if (new_cpu == cpu) {
6221             /* Now try balancing at a lower domain level of 'cpu': */
6222             sd = sd->child;
6223             continue;
6224         }
6225 
6226         /* Now try balancing at a lower domain level of 'new_cpu': */
6227         cpu = new_cpu;
6228         weight = sd->span_weight;
6229         sd = NULL;
6230         for_each_domain(cpu, tmp) {
6231             if (weight <= tmp->span_weight)
6232                 break;
6233             if (tmp->flags & sd_flag)
6234                 sd = tmp;
6235         }
6236     }
6237 
6238     return new_cpu;
6239 }
6240 
6241 static inline int __select_idle_cpu(int cpu, struct task_struct *p)
6242 {
6243     if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) &&
6244         sched_cpu_cookie_match(cpu_rq(cpu), p))
6245         return cpu;
6246 
6247     return -1;
6248 }
6249 
6250 #ifdef CONFIG_SCHED_SMT
6251 DEFINE_STATIC_KEY_FALSE(sched_smt_present);
6252 EXPORT_SYMBOL_GPL(sched_smt_present);
6253 
6254 static inline void set_idle_cores(int cpu, int val)
6255 {
6256     struct sched_domain_shared *sds;
6257 
6258     sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6259     if (sds)
6260         WRITE_ONCE(sds->has_idle_cores, val);
6261 }
6262 
6263 static inline bool test_idle_cores(int cpu, bool def)
6264 {
6265     struct sched_domain_shared *sds;
6266 
6267     sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6268     if (sds)
6269         return READ_ONCE(sds->has_idle_cores);
6270 
6271     return def;
6272 }
6273 
6274 /*
6275  * Scans the local SMT mask to see if the entire core is idle, and records this
6276  * information in sd_llc_shared->has_idle_cores.
6277  *
6278  * Since SMT siblings share all cache levels, inspecting this limited remote
6279  * state should be fairly cheap.
6280  */
6281 void __update_idle_core(struct rq *rq)
6282 {
6283     int core = cpu_of(rq);
6284     int cpu;
6285 
6286     rcu_read_lock();
6287     if (test_idle_cores(core, true))
6288         goto unlock;
6289 
6290     for_each_cpu(cpu, cpu_smt_mask(core)) {
6291         if (cpu == core)
6292             continue;
6293 
6294         if (!available_idle_cpu(cpu))
6295             goto unlock;
6296     }
6297 
6298     set_idle_cores(core, 1);
6299 unlock:
6300     rcu_read_unlock();
6301 }
6302 
6303 /*
6304  * Scan the entire LLC domain for idle cores; this dynamically switches off if
6305  * there are no idle cores left in the system; tracked through
6306  * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
6307  */
6308 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
6309 {
6310     bool idle = true;
6311     int cpu;
6312 
6313     if (!static_branch_likely(&sched_smt_present))
6314         return __select_idle_cpu(core, p);
6315 
6316     for_each_cpu(cpu, cpu_smt_mask(core)) {
6317         if (!available_idle_cpu(cpu)) {
6318             idle = false;
6319             if (*idle_cpu == -1) {
6320                 if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
6321                     *idle_cpu = cpu;
6322                     break;
6323                 }
6324                 continue;
6325             }
6326             break;
6327         }
6328         if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
6329             *idle_cpu = cpu;
6330     }
6331 
6332     if (idle)
6333         return core;
6334 
6335     cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
6336     return -1;
6337 }
6338 
6339 /*
6340  * Scan the local SMT mask for idle CPUs.
6341  */
6342 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
6343 {
6344     int cpu;
6345 
6346     for_each_cpu(cpu, cpu_smt_mask(target)) {
6347         if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
6348             !cpumask_test_cpu(cpu, sched_domain_span(sd)))
6349             continue;
6350         if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
6351             return cpu;
6352     }
6353 
6354     return -1;
6355 }
6356 
6357 #else /* CONFIG_SCHED_SMT */
6358 
6359 static inline void set_idle_cores(int cpu, int val)
6360 {
6361 }
6362 
6363 static inline bool test_idle_cores(int cpu, bool def)
6364 {
6365     return def;
6366 }
6367 
6368 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
6369 {
6370     return __select_idle_cpu(core, p);
6371 }
6372 
6373 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
6374 {
6375     return -1;
6376 }
6377 
6378 #endif /* CONFIG_SCHED_SMT */
6379 
6380 /*
6381  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6382  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
6383  * average idle time for this rq (as found in rq->avg_idle).
6384  */
6385 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
6386 {
6387     struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
6388     int i, cpu, idle_cpu = -1, nr = INT_MAX;
6389     struct sched_domain_shared *sd_share;
6390     struct rq *this_rq = this_rq();
6391     int this = smp_processor_id();
6392     struct sched_domain *this_sd;
6393     u64 time = 0;
6394 
6395     this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
6396     if (!this_sd)
6397         return -1;
6398 
6399     cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6400 
6401     if (sched_feat(SIS_PROP) && !has_idle_core) {
6402         u64 avg_cost, avg_idle, span_avg;
6403         unsigned long now = jiffies;
6404 
6405         /*
6406          * If we're busy, the assumption that the last idle period
6407          * predicts the future is flawed; age away the remaining
6408          * predicted idle time.
6409          */
6410         if (unlikely(this_rq->wake_stamp < now)) {
6411             while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
6412                 this_rq->wake_stamp++;
6413                 this_rq->wake_avg_idle >>= 1;
6414             }
6415         }
6416 
6417         avg_idle = this_rq->wake_avg_idle;
6418         avg_cost = this_sd->avg_scan_cost + 1;
6419 
6420         span_avg = sd->span_weight * avg_idle;
6421         if (span_avg > 4*avg_cost)
6422             nr = div_u64(span_avg, avg_cost);
6423         else
6424             nr = 4;
6425 
6426         time = cpu_clock(this);
6427     }
6428 
6429     if (sched_feat(SIS_UTIL)) {
6430         sd_share = rcu_dereference(per_cpu(sd_llc_shared, target));
6431         if (sd_share) {
6432             /* because !--nr is the condition to stop scan */
6433             nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
6434             /* overloaded LLC is unlikely to have idle cpu/core */
6435             if (nr == 1)
6436                 return -1;
6437         }
6438     }
6439 
6440     for_each_cpu_wrap(cpu, cpus, target + 1) {
6441         if (has_idle_core) {
6442             i = select_idle_core(p, cpu, cpus, &idle_cpu);
6443             if ((unsigned int)i < nr_cpumask_bits)
6444                 return i;
6445 
6446         } else {
6447             if (!--nr)
6448                 return -1;
6449             idle_cpu = __select_idle_cpu(cpu, p);
6450             if ((unsigned int)idle_cpu < nr_cpumask_bits)
6451                 break;
6452         }
6453     }
6454 
6455     if (has_idle_core)
6456         set_idle_cores(target, false);
6457 
6458     if (sched_feat(SIS_PROP) && !has_idle_core) {
6459         time = cpu_clock(this) - time;
6460 
6461         /*
6462          * Account for the scan cost of wakeups against the average
6463          * idle time.
6464          */
6465         this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
6466 
6467         update_avg(&this_sd->avg_scan_cost, time);
6468     }
6469 
6470     return idle_cpu;
6471 }
6472 
6473 /*
6474  * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
6475  * the task fits. If no CPU is big enough, but there are idle ones, try to
6476  * maximize capacity.
6477  */
6478 static int
6479 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
6480 {
6481     unsigned long task_util, best_cap = 0;
6482     int cpu, best_cpu = -1;
6483     struct cpumask *cpus;
6484 
6485     cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
6486     cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6487 
6488     task_util = uclamp_task_util(p);
6489 
6490     for_each_cpu_wrap(cpu, cpus, target) {
6491         unsigned long cpu_cap = capacity_of(cpu);
6492 
6493         if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
6494             continue;
6495         if (fits_capacity(task_util, cpu_cap))
6496             return cpu;
6497 
6498         if (cpu_cap > best_cap) {
6499             best_cap = cpu_cap;
6500             best_cpu = cpu;
6501         }
6502     }
6503 
6504     return best_cpu;
6505 }
6506 
6507 static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
6508 {
6509     if (static_branch_unlikely(&sched_asym_cpucapacity))
6510         return fits_capacity(task_util, capacity_of(cpu));
6511 
6512     return true;
6513 }
6514 
6515 /*
6516  * Try and locate an idle core/thread in the LLC cache domain.
6517  */
6518 static int select_idle_sibling(struct task_struct *p, int prev, int target)
6519 {
6520     bool has_idle_core = false;
6521     struct sched_domain *sd;
6522     unsigned long task_util;
6523     int i, recent_used_cpu;
6524 
6525     /*
6526      * On asymmetric system, update task utilization because we will check
6527      * that the task fits with cpu's capacity.
6528      */
6529     if (static_branch_unlikely(&sched_asym_cpucapacity)) {
6530         sync_entity_load_avg(&p->se);
6531         task_util = uclamp_task_util(p);
6532     }
6533 
6534     /*
6535      * per-cpu select_rq_mask usage
6536      */
6537     lockdep_assert_irqs_disabled();
6538 
6539     if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
6540         asym_fits_capacity(task_util, target))
6541         return target;
6542 
6543     /*
6544      * If the previous CPU is cache affine and idle, don't be stupid:
6545      */
6546     if (prev != target && cpus_share_cache(prev, target) &&
6547         (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
6548         asym_fits_capacity(task_util, prev))
6549         return prev;
6550 
6551     /*
6552      * Allow a per-cpu kthread to stack with the wakee if the
6553      * kworker thread and the tasks previous CPUs are the same.
6554      * The assumption is that the wakee queued work for the
6555      * per-cpu kthread that is now complete and the wakeup is
6556      * essentially a sync wakeup. An obvious example of this
6557      * pattern is IO completions.
6558      */
6559     if (is_per_cpu_kthread(current) &&
6560         in_task() &&
6561         prev == smp_processor_id() &&
6562         this_rq()->nr_running <= 1 &&
6563         asym_fits_capacity(task_util, prev)) {
6564         return prev;
6565     }
6566 
6567     /* Check a recently used CPU as a potential idle candidate: */
6568     recent_used_cpu = p->recent_used_cpu;
6569     p->recent_used_cpu = prev;
6570     if (recent_used_cpu != prev &&
6571         recent_used_cpu != target &&
6572         cpus_share_cache(recent_used_cpu, target) &&
6573         (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
6574         cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
6575         asym_fits_capacity(task_util, recent_used_cpu)) {
6576         return recent_used_cpu;
6577     }
6578 
6579     /*
6580      * For asymmetric CPU capacity systems, our domain of interest is
6581      * sd_asym_cpucapacity rather than sd_llc.
6582      */
6583     if (static_branch_unlikely(&sched_asym_cpucapacity)) {
6584         sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
6585         /*
6586          * On an asymmetric CPU capacity system where an exclusive
6587          * cpuset defines a symmetric island (i.e. one unique
6588          * capacity_orig value through the cpuset), the key will be set
6589          * but the CPUs within that cpuset will not have a domain with
6590          * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
6591          * capacity path.
6592          */
6593         if (sd) {
6594             i = select_idle_capacity(p, sd, target);
6595             return ((unsigned)i < nr_cpumask_bits) ? i : target;
6596         }
6597     }
6598 
6599     sd = rcu_dereference(per_cpu(sd_llc, target));
6600     if (!sd)
6601         return target;
6602 
6603     if (sched_smt_active()) {
6604         has_idle_core = test_idle_cores(target, false);
6605 
6606         if (!has_idle_core && cpus_share_cache(prev, target)) {
6607             i = select_idle_smt(p, sd, prev);
6608             if ((unsigned int)i < nr_cpumask_bits)
6609                 return i;
6610         }
6611     }
6612 
6613     i = select_idle_cpu(p, sd, has_idle_core, target);
6614     if ((unsigned)i < nr_cpumask_bits)
6615         return i;
6616 
6617     return target;
6618 }
6619 
6620 /*
6621  * Predicts what cpu_util(@cpu) would return if @p was removed from @cpu
6622  * (@dst_cpu = -1) or migrated to @dst_cpu.
6623  */
6624 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
6625 {
6626     struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
6627     unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
6628 
6629     /*
6630      * If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its
6631      * contribution. If @p migrates from another CPU to @cpu add its
6632      * contribution. In all the other cases @cpu is not impacted by the
6633      * migration so its util_avg is already correct.
6634      */
6635     if (task_cpu(p) == cpu && dst_cpu != cpu)
6636         lsub_positive(&util, task_util(p));
6637     else if (task_cpu(p) != cpu && dst_cpu == cpu)
6638         util += task_util(p);
6639 
6640     if (sched_feat(UTIL_EST)) {
6641         unsigned long util_est;
6642 
6643         util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
6644 
6645         /*
6646          * During wake-up @p isn't enqueued yet and doesn't contribute
6647          * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
6648          * If @dst_cpu == @cpu add it to "simulate" cpu_util after @p
6649          * has been enqueued.
6650          *
6651          * During exec (@dst_cpu = -1) @p is enqueued and does
6652          * contribute to cpu_rq(cpu)->cfs.util_est.enqueued.
6653          * Remove it to "simulate" cpu_util without @p's contribution.
6654          *
6655          * Despite the task_on_rq_queued(@p) check there is still a
6656          * small window for a possible race when an exec
6657          * select_task_rq_fair() races with LB's detach_task().
6658          *
6659          *   detach_task()
6660          *     deactivate_task()
6661          *       p->on_rq = TASK_ON_RQ_MIGRATING;
6662          *       -------------------------------- A
6663          *       dequeue_task()                    \
6664          *         dequeue_task_fair()              + Race Time
6665          *           util_est_dequeue()            /
6666          *       -------------------------------- B
6667          *
6668          * The additional check "current == p" is required to further
6669          * reduce the race window.
6670          */
6671         if (dst_cpu == cpu)
6672             util_est += _task_util_est(p);
6673         else if (unlikely(task_on_rq_queued(p) || current == p))
6674             lsub_positive(&util_est, _task_util_est(p));
6675 
6676         util = max(util, util_est);
6677     }
6678 
6679     return min(util, capacity_orig_of(cpu));
6680 }
6681 
6682 /*
6683  * cpu_util_without: compute cpu utilization without any contributions from *p
6684  * @cpu: the CPU which utilization is requested
6685  * @p: the task which utilization should be discounted
6686  *
6687  * The utilization of a CPU is defined by the utilization of tasks currently
6688  * enqueued on that CPU as well as tasks which are currently sleeping after an
6689  * execution on that CPU.
6690  *
6691  * This method returns the utilization of the specified CPU by discounting the
6692  * utilization of the specified task, whenever the task is currently
6693  * contributing to the CPU utilization.
6694  */
6695 static unsigned long cpu_util_without(int cpu, struct task_struct *p)
6696 {
6697     /* Task has no contribution or is new */
6698     if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6699         return cpu_util_cfs(cpu);
6700 
6701     return cpu_util_next(cpu, p, -1);
6702 }
6703 
6704 /*
6705  * energy_env - Utilization landscape for energy estimation.
6706  * @task_busy_time: Utilization contribution by the task for which we test the
6707  *                  placement. Given by eenv_task_busy_time().
6708  * @pd_busy_time:   Utilization of the whole perf domain without the task
6709  *                  contribution. Given by eenv_pd_busy_time().
6710  * @cpu_cap:        Maximum CPU capacity for the perf domain.
6711  * @pd_cap:         Entire perf domain capacity. (pd->nr_cpus * cpu_cap).
6712  */
6713 struct energy_env {
6714     unsigned long task_busy_time;
6715     unsigned long pd_busy_time;
6716     unsigned long cpu_cap;
6717     unsigned long pd_cap;
6718 };
6719 
6720 /*
6721  * Compute the task busy time for compute_energy(). This time cannot be
6722  * injected directly into effective_cpu_util() because of the IRQ scaling.
6723  * The latter only makes sense with the most recent CPUs where the task has
6724  * run.
6725  */
6726 static inline void eenv_task_busy_time(struct energy_env *eenv,
6727                        struct task_struct *p, int prev_cpu)
6728 {
6729     unsigned long busy_time, max_cap = arch_scale_cpu_capacity(prev_cpu);
6730     unsigned long irq = cpu_util_irq(cpu_rq(prev_cpu));
6731 
6732     if (unlikely(irq >= max_cap))
6733         busy_time = max_cap;
6734     else
6735         busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap);
6736 
6737     eenv->task_busy_time = busy_time;
6738 }
6739 
6740 /*
6741  * Compute the perf_domain (PD) busy time for compute_energy(). Based on the
6742  * utilization for each @pd_cpus, it however doesn't take into account
6743  * clamping since the ratio (utilization / cpu_capacity) is already enough to
6744  * scale the EM reported power consumption at the (eventually clamped)
6745  * cpu_capacity.
6746  *
6747  * The contribution of the task @p for which we want to estimate the
6748  * energy cost is removed (by cpu_util_next()) and must be calculated
6749  * separately (see eenv_task_busy_time). This ensures:
6750  *
6751  *   - A stable PD utilization, no matter which CPU of that PD we want to place
6752  *     the task on.
6753  *
6754  *   - A fair comparison between CPUs as the task contribution (task_util())
6755  *     will always be the same no matter which CPU utilization we rely on
6756  *     (util_avg or util_est).
6757  *
6758  * Set @eenv busy time for the PD that spans @pd_cpus. This busy time can't
6759  * exceed @eenv->pd_cap.
6760  */
6761 static inline void eenv_pd_busy_time(struct energy_env *eenv,
6762                      struct cpumask *pd_cpus,
6763                      struct task_struct *p)
6764 {
6765     unsigned long busy_time = 0;
6766     int cpu;
6767 
6768     for_each_cpu(cpu, pd_cpus) {
6769         unsigned long util = cpu_util_next(cpu, p, -1);
6770 
6771         busy_time += effective_cpu_util(cpu, util, ENERGY_UTIL, NULL);
6772     }
6773 
6774     eenv->pd_busy_time = min(eenv->pd_cap, busy_time);
6775 }
6776 
6777 /*
6778  * Compute the maximum utilization for compute_energy() when the task @p
6779  * is placed on the cpu @dst_cpu.
6780  *
6781  * Returns the maximum utilization among @eenv->cpus. This utilization can't
6782  * exceed @eenv->cpu_cap.
6783  */
6784 static inline unsigned long
6785 eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus,
6786          struct task_struct *p, int dst_cpu)
6787 {
6788     unsigned long max_util = 0;
6789     int cpu;
6790 
6791     for_each_cpu(cpu, pd_cpus) {
6792         struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL;
6793         unsigned long util = cpu_util_next(cpu, p, dst_cpu);
6794         unsigned long cpu_util;
6795 
6796         /*
6797          * Performance domain frequency: utilization clamping
6798          * must be considered since it affects the selection
6799          * of the performance domain frequency.
6800          * NOTE: in case RT tasks are running, by default the
6801          * FREQUENCY_UTIL's utilization can be max OPP.
6802          */
6803         cpu_util = effective_cpu_util(cpu, util, FREQUENCY_UTIL, tsk);
6804         max_util = max(max_util, cpu_util);
6805     }
6806 
6807     return min(max_util, eenv->cpu_cap);
6808 }
6809 
6810 /*
6811  * compute_energy(): Use the Energy Model to estimate the energy that @pd would
6812  * consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task
6813  * contribution is ignored.
6814  */
6815 static inline unsigned long
6816 compute_energy(struct energy_env *eenv, struct perf_domain *pd,
6817            struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu)
6818 {
6819     unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu);
6820     unsigned long busy_time = eenv->pd_busy_time;
6821 
6822     if (dst_cpu >= 0)
6823         busy_time = min(eenv->pd_cap, busy_time + eenv->task_busy_time);
6824 
6825     return em_cpu_energy(pd->em_pd, max_util, busy_time, eenv->cpu_cap);
6826 }
6827 
6828 /*
6829  * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
6830  * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
6831  * spare capacity in each performance domain and uses it as a potential
6832  * candidate to execute the task. Then, it uses the Energy Model to figure
6833  * out which of the CPU candidates is the most energy-efficient.
6834  *
6835  * The rationale for this heuristic is as follows. In a performance domain,
6836  * all the most energy efficient CPU candidates (according to the Energy
6837  * Model) are those for which we'll request a low frequency. When there are
6838  * several CPUs for which the frequency request will be the same, we don't
6839  * have enough data to break the tie between them, because the Energy Model
6840  * only includes active power costs. With this model, if we assume that
6841  * frequency requests follow utilization (e.g. using schedutil), the CPU with
6842  * the maximum spare capacity in a performance domain is guaranteed to be among
6843  * the best candidates of the performance domain.
6844  *
6845  * In practice, it could be preferable from an energy standpoint to pack
6846  * small tasks on a CPU in order to let other CPUs go in deeper idle states,
6847  * but that could also hurt our chances to go cluster idle, and we have no
6848  * ways to tell with the current Energy Model if this is actually a good
6849  * idea or not. So, find_energy_efficient_cpu() basically favors
6850  * cluster-packing, and spreading inside a cluster. That should at least be
6851  * a good thing for latency, and this is consistent with the idea that most
6852  * of the energy savings of EAS come from the asymmetry of the system, and
6853  * not so much from breaking the tie between identical CPUs. That's also the
6854  * reason why EAS is enabled in the topology code only for systems where
6855  * SD_ASYM_CPUCAPACITY is set.
6856  *
6857  * NOTE: Forkees are not accepted in the energy-aware wake-up path because
6858  * they don't have any useful utilization data yet and it's not possible to
6859  * forecast their impact on energy consumption. Consequently, they will be
6860  * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
6861  * to be energy-inefficient in some use-cases. The alternative would be to
6862  * bias new tasks towards specific types of CPUs first, or to try to infer
6863  * their util_avg from the parent task, but those heuristics could hurt
6864  * other use-cases too. So, until someone finds a better way to solve this,
6865  * let's keep things simple by re-using the existing slow path.
6866  */
6867 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
6868 {
6869     struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
6870     unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
6871     struct root_domain *rd = this_rq()->rd;
6872     int cpu, best_energy_cpu, target = -1;
6873     struct sched_domain *sd;
6874     struct perf_domain *pd;
6875     struct energy_env eenv;
6876 
6877     rcu_read_lock();
6878     pd = rcu_dereference(rd->pd);
6879     if (!pd || READ_ONCE(rd->overutilized))
6880         goto unlock;
6881 
6882     /*
6883      * Energy-aware wake-up happens on the lowest sched_domain starting
6884      * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
6885      */
6886     sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
6887     while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
6888         sd = sd->parent;
6889     if (!sd)
6890         goto unlock;
6891 
6892     target = prev_cpu;
6893 
6894     sync_entity_load_avg(&p->se);
6895     if (!task_util_est(p))
6896         goto unlock;
6897 
6898     eenv_task_busy_time(&eenv, p, prev_cpu);
6899 
6900     for (; pd; pd = pd->next) {
6901         unsigned long cpu_cap, cpu_thermal_cap, util;
6902         unsigned long cur_delta, max_spare_cap = 0;
6903         bool compute_prev_delta = false;
6904         int max_spare_cap_cpu = -1;
6905         unsigned long base_energy;
6906 
6907         cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
6908 
6909         if (cpumask_empty(cpus))
6910             continue;
6911 
6912         /* Account thermal pressure for the energy estimation */
6913         cpu = cpumask_first(cpus);
6914         cpu_thermal_cap = arch_scale_cpu_capacity(cpu);
6915         cpu_thermal_cap -= arch_scale_thermal_pressure(cpu);
6916 
6917         eenv.cpu_cap = cpu_thermal_cap;
6918         eenv.pd_cap = 0;
6919 
6920         for_each_cpu(cpu, cpus) {
6921             eenv.pd_cap += cpu_thermal_cap;
6922 
6923             if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
6924                 continue;
6925 
6926             if (!cpumask_test_cpu(cpu, p->cpus_ptr))
6927                 continue;
6928 
6929             util = cpu_util_next(cpu, p, cpu);
6930             cpu_cap = capacity_of(cpu);
6931 
6932             /*
6933              * Skip CPUs that cannot satisfy the capacity request.
6934              * IOW, placing the task there would make the CPU
6935              * overutilized. Take uclamp into account to see how
6936              * much capacity we can get out of the CPU; this is
6937              * aligned with sched_cpu_util().
6938              */
6939             util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
6940             if (!fits_capacity(util, cpu_cap))
6941                 continue;
6942 
6943             lsub_positive(&cpu_cap, util);
6944 
6945             if (cpu == prev_cpu) {
6946                 /* Always use prev_cpu as a candidate. */
6947                 compute_prev_delta = true;
6948             } else if (cpu_cap > max_spare_cap) {
6949                 /*
6950                  * Find the CPU with the maximum spare capacity
6951                  * in the performance domain.
6952                  */
6953                 max_spare_cap = cpu_cap;
6954                 max_spare_cap_cpu = cpu;
6955             }
6956         }
6957 
6958         if (max_spare_cap_cpu < 0 && !compute_prev_delta)
6959             continue;
6960 
6961         eenv_pd_busy_time(&eenv, cpus, p);
6962         /* Compute the 'base' energy of the pd, without @p */
6963         base_energy = compute_energy(&eenv, pd, cpus, p, -1);
6964 
6965         /* Evaluate the energy impact of using prev_cpu. */
6966         if (compute_prev_delta) {
6967             prev_delta = compute_energy(&eenv, pd, cpus, p,
6968                             prev_cpu);
6969             /* CPU utilization has changed */
6970             if (prev_delta < base_energy)
6971                 goto unlock;
6972             prev_delta -= base_energy;
6973             best_delta = min(best_delta, prev_delta);
6974         }
6975 
6976         /* Evaluate the energy impact of using max_spare_cap_cpu. */
6977         if (max_spare_cap_cpu >= 0) {
6978             cur_delta = compute_energy(&eenv, pd, cpus, p,
6979                            max_spare_cap_cpu);
6980             /* CPU utilization has changed */
6981             if (cur_delta < base_energy)
6982                 goto unlock;
6983             cur_delta -= base_energy;
6984             if (cur_delta < best_delta) {
6985                 best_delta = cur_delta;
6986                 best_energy_cpu = max_spare_cap_cpu;
6987             }
6988         }
6989     }
6990     rcu_read_unlock();
6991 
6992     if (best_delta < prev_delta)
6993         target = best_energy_cpu;
6994 
6995     return target;
6996 
6997 unlock:
6998     rcu_read_unlock();
6999 
7000     return target;
7001 }
7002 
7003 /*
7004  * select_task_rq_fair: Select target runqueue for the waking task in domains
7005  * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
7006  * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
7007  *
7008  * Balances load by selecting the idlest CPU in the idlest group, or under
7009  * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
7010  *
7011  * Returns the target CPU number.
7012  */
7013 static int
7014 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
7015 {
7016     int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
7017     struct sched_domain *tmp, *sd = NULL;
7018     int cpu = smp_processor_id();
7019     int new_cpu = prev_cpu;
7020     int want_affine = 0;
7021     /* SD_flags and WF_flags share the first nibble */
7022     int sd_flag = wake_flags & 0xF;
7023 
7024     /*
7025      * required for stable ->cpus_allowed
7026      */
7027     lockdep_assert_held(&p->pi_lock);
7028     if (wake_flags & WF_TTWU) {
7029         record_wakee(p);
7030 
7031         if (sched_energy_enabled()) {
7032             new_cpu = find_energy_efficient_cpu(p, prev_cpu);
7033             if (new_cpu >= 0)
7034                 return new_cpu;
7035             new_cpu = prev_cpu;
7036         }
7037 
7038         want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
7039     }
7040 
7041     rcu_read_lock();
7042     for_each_domain(cpu, tmp) {
7043         /*
7044          * If both 'cpu' and 'prev_cpu' are part of this domain,
7045          * cpu is a valid SD_WAKE_AFFINE target.
7046          */
7047         if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
7048             cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
7049             if (cpu != prev_cpu)
7050                 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
7051 
7052             sd = NULL; /* Prefer wake_affine over balance flags */
7053             break;
7054         }
7055 
7056         /*
7057          * Usually only true for WF_EXEC and WF_FORK, as sched_domains
7058          * usually do not have SD_BALANCE_WAKE set. That means wakeup
7059          * will usually go to the fast path.
7060          */
7061         if (tmp->flags & sd_flag)
7062             sd = tmp;
7063         else if (!want_affine)
7064             break;
7065     }
7066 
7067     if (unlikely(sd)) {
7068         /* Slow path */
7069         new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
7070     } else if (wake_flags & WF_TTWU) { /* XXX always ? */
7071         /* Fast path */
7072         new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
7073     }
7074     rcu_read_unlock();
7075 
7076     return new_cpu;
7077 }
7078 
7079 static void detach_entity_cfs_rq(struct sched_entity *se);
7080 
7081 /*
7082  * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
7083  * cfs_rq_of(p) references at time of call are still valid and identify the
7084  * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
7085  */
7086 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
7087 {
7088     struct sched_entity *se = &p->se;
7089 
7090     /*
7091      * As blocked tasks retain absolute vruntime the migration needs to
7092      * deal with this by subtracting the old and adding the new
7093      * min_vruntime -- the latter is done by enqueue_entity() when placing
7094      * the task on the new runqueue.
7095      */
7096     if (READ_ONCE(p->__state) == TASK_WAKING) {
7097         struct cfs_rq *cfs_rq = cfs_rq_of(se);
7098 
7099         se->vruntime -= u64_u32_load(cfs_rq->min_vruntime);
7100     }
7101 
7102     if (p->on_rq == TASK_ON_RQ_MIGRATING) {
7103         /*
7104          * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
7105          * rq->lock and can modify state directly.
7106          */
7107         lockdep_assert_rq_held(task_rq(p));
7108         detach_entity_cfs_rq(se);
7109 
7110     } else {
7111         remove_entity_load_avg(se);
7112 
7113         /*
7114          * Here, the task's PELT values have been updated according to
7115          * the current rq's clock. But if that clock hasn't been
7116          * updated in a while, a substantial idle time will be missed,
7117          * leading to an inflation after wake-up on the new rq.
7118          *
7119          * Estimate the missing time from the cfs_rq last_update_time
7120          * and update sched_avg to improve the PELT continuity after
7121          * migration.
7122          */
7123         migrate_se_pelt_lag(se);
7124     }
7125 
7126     /* Tell new CPU we are migrated */
7127     se->avg.last_update_time = 0;
7128 
7129     /* We have migrated, no longer consider this task hot */
7130     se->exec_start = 0;
7131 
7132     update_scan_period(p, new_cpu);
7133 }
7134 
7135 static void task_dead_fair(struct task_struct *p)
7136 {
7137     remove_entity_load_avg(&p->se);
7138 }
7139 
7140 static int
7141 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7142 {
7143     if (rq->nr_running)
7144         return 1;
7145 
7146     return newidle_balance(rq, rf) != 0;
7147 }
7148 #endif /* CONFIG_SMP */
7149 
7150 static unsigned long wakeup_gran(struct sched_entity *se)
7151 {
7152     unsigned long gran = sysctl_sched_wakeup_granularity;
7153 
7154     /*
7155      * Since its curr running now, convert the gran from real-time
7156      * to virtual-time in his units.
7157      *
7158      * By using 'se' instead of 'curr' we penalize light tasks, so
7159      * they get preempted easier. That is, if 'se' < 'curr' then
7160      * the resulting gran will be larger, therefore penalizing the
7161      * lighter, if otoh 'se' > 'curr' then the resulting gran will
7162      * be smaller, again penalizing the lighter task.
7163      *
7164      * This is especially important for buddies when the leftmost
7165      * task is higher priority than the buddy.
7166      */
7167     return calc_delta_fair(gran, se);
7168 }
7169 
7170 /*
7171  * Should 'se' preempt 'curr'.
7172  *
7173  *             |s1
7174  *        |s2
7175  *   |s3
7176  *         g
7177  *      |<--->|c
7178  *
7179  *  w(c, s1) = -1
7180  *  w(c, s2) =  0
7181  *  w(c, s3) =  1
7182  *
7183  */
7184 static int
7185 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
7186 {
7187     s64 gran, vdiff = curr->vruntime - se->vruntime;
7188 
7189     if (vdiff <= 0)
7190         return -1;
7191 
7192     gran = wakeup_gran(se);
7193     if (vdiff > gran)
7194         return 1;
7195 
7196     return 0;
7197 }
7198 
7199 static void set_last_buddy(struct sched_entity *se)
7200 {
7201     for_each_sched_entity(se) {
7202         if (SCHED_WARN_ON(!se->on_rq))
7203             return;
7204         if (se_is_idle(se))
7205             return;
7206         cfs_rq_of(se)->last = se;
7207     }
7208 }
7209 
7210 static void set_next_buddy(struct sched_entity *se)
7211 {
7212     for_each_sched_entity(se) {
7213         if (SCHED_WARN_ON(!se->on_rq))
7214             return;
7215         if (se_is_idle(se))
7216             return;
7217         cfs_rq_of(se)->next = se;
7218     }
7219 }
7220 
7221 static void set_skip_buddy(struct sched_entity *se)
7222 {
7223     for_each_sched_entity(se)
7224         cfs_rq_of(se)->skip = se;
7225 }
7226 
7227 /*
7228  * Preempt the current task with a newly woken task if needed:
7229  */
7230 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
7231 {
7232     struct task_struct *curr = rq->curr;
7233     struct sched_entity *se = &curr->se, *pse = &p->se;
7234     struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7235     int scale = cfs_rq->nr_running >= sched_nr_latency;
7236     int next_buddy_marked = 0;
7237     int cse_is_idle, pse_is_idle;
7238 
7239     if (unlikely(se == pse))
7240         return;
7241 
7242     /*
7243      * This is possible from callers such as attach_tasks(), in which we
7244      * unconditionally check_preempt_curr() after an enqueue (which may have
7245      * lead to a throttle).  This both saves work and prevents false
7246      * next-buddy nomination below.
7247      */
7248     if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
7249         return;
7250 
7251     if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
7252         set_next_buddy(pse);
7253         next_buddy_marked = 1;
7254     }
7255 
7256     /*
7257      * We can come here with TIF_NEED_RESCHED already set from new task
7258      * wake up path.
7259      *
7260      * Note: this also catches the edge-case of curr being in a throttled
7261      * group (e.g. via set_curr_task), since update_curr() (in the
7262      * enqueue of curr) will have resulted in resched being set.  This
7263      * prevents us from potentially nominating it as a false LAST_BUDDY
7264      * below.
7265      */
7266     if (test_tsk_need_resched(curr))
7267         return;
7268 
7269     /* Idle tasks are by definition preempted by non-idle tasks. */
7270     if (unlikely(task_has_idle_policy(curr)) &&
7271         likely(!task_has_idle_policy(p)))
7272         goto preempt;
7273 
7274     /*
7275      * Batch and idle tasks do not preempt non-idle tasks (their preemption
7276      * is driven by the tick):
7277      */
7278     if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
7279         return;
7280 
7281     find_matching_se(&se, &pse);
7282     BUG_ON(!pse);
7283 
7284     cse_is_idle = se_is_idle(se);
7285     pse_is_idle = se_is_idle(pse);
7286 
7287     /*
7288      * Preempt an idle group in favor of a non-idle group (and don't preempt
7289      * in the inverse case).
7290      */
7291     if (cse_is_idle && !pse_is_idle)
7292         goto preempt;
7293     if (cse_is_idle != pse_is_idle)
7294         return;
7295 
7296     update_curr(cfs_rq_of(se));
7297     if (wakeup_preempt_entity(se, pse) == 1) {
7298         /*
7299          * Bias pick_next to pick the sched entity that is
7300          * triggering this preemption.
7301          */
7302         if (!next_buddy_marked)
7303             set_next_buddy(pse);
7304         goto preempt;
7305     }
7306 
7307     return;
7308 
7309 preempt:
7310     resched_curr(rq);
7311     /*
7312      * Only set the backward buddy when the current task is still
7313      * on the rq. This can happen when a wakeup gets interleaved
7314      * with schedule on the ->pre_schedule() or idle_balance()
7315      * point, either of which can * drop the rq lock.
7316      *
7317      * Also, during early boot the idle thread is in the fair class,
7318      * for obvious reasons its a bad idea to schedule back to it.
7319      */
7320     if (unlikely(!se->on_rq || curr == rq->idle))
7321         return;
7322 
7323     if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
7324         set_last_buddy(se);
7325 }
7326 
7327 #ifdef CONFIG_SMP
7328 static struct task_struct *pick_task_fair(struct rq *rq)
7329 {
7330     struct sched_entity *se;
7331     struct cfs_rq *cfs_rq;
7332 
7333 again:
7334     cfs_rq = &rq->cfs;
7335     if (!cfs_rq->nr_running)
7336         return NULL;
7337 
7338     do {
7339         struct sched_entity *curr = cfs_rq->curr;
7340 
7341         /* When we pick for a remote RQ, we'll not have done put_prev_entity() */
7342         if (curr) {
7343             if (curr->on_rq)
7344                 update_curr(cfs_rq);
7345             else
7346                 curr = NULL;
7347 
7348             if (unlikely(check_cfs_rq_runtime(cfs_rq)))
7349                 goto again;
7350         }
7351 
7352         se = pick_next_entity(cfs_rq, curr);
7353         cfs_rq = group_cfs_rq(se);
7354     } while (cfs_rq);
7355 
7356     return task_of(se);
7357 }
7358 #endif
7359 
7360 struct task_struct *
7361 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7362 {
7363     struct cfs_rq *cfs_rq = &rq->cfs;
7364     struct sched_entity *se;
7365     struct task_struct *p;
7366     int new_tasks;
7367 
7368 again:
7369     if (!sched_fair_runnable(rq))
7370         goto idle;
7371 
7372 #ifdef CONFIG_FAIR_GROUP_SCHED
7373     if (!prev || prev->sched_class != &fair_sched_class)
7374         goto simple;
7375 
7376     /*
7377      * Because of the set_next_buddy() in dequeue_task_fair() it is rather
7378      * likely that a next task is from the same cgroup as the current.
7379      *
7380      * Therefore attempt to avoid putting and setting the entire cgroup
7381      * hierarchy, only change the part that actually changes.
7382      */
7383 
7384     do {
7385         struct sched_entity *curr = cfs_rq->curr;
7386 
7387         /*
7388          * Since we got here without doing put_prev_entity() we also
7389          * have to consider cfs_rq->curr. If it is still a runnable
7390          * entity, update_curr() will update its vruntime, otherwise
7391          * forget we've ever seen it.
7392          */
7393         if (curr) {
7394             if (curr->on_rq)
7395                 update_curr(cfs_rq);
7396             else
7397                 curr = NULL;
7398 
7399             /*
7400              * This call to check_cfs_rq_runtime() will do the
7401              * throttle and dequeue its entity in the parent(s).
7402              * Therefore the nr_running test will indeed
7403              * be correct.
7404              */
7405             if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
7406                 cfs_rq = &rq->cfs;
7407 
7408                 if (!cfs_rq->nr_running)
7409                     goto idle;
7410 
7411                 goto simple;
7412             }
7413         }
7414 
7415         se = pick_next_entity(cfs_rq, curr);
7416         cfs_rq = group_cfs_rq(se);
7417     } while (cfs_rq);
7418 
7419     p = task_of(se);
7420 
7421     /*
7422      * Since we haven't yet done put_prev_entity and if the selected task
7423      * is a different task than we started out with, try and touch the
7424      * least amount of cfs_rqs.
7425      */
7426     if (prev != p) {
7427         struct sched_entity *pse = &prev->se;
7428 
7429         while (!(cfs_rq = is_same_group(se, pse))) {
7430             int se_depth = se->depth;
7431             int pse_depth = pse->depth;
7432 
7433             if (se_depth <= pse_depth) {
7434                 put_prev_entity(cfs_rq_of(pse), pse);
7435                 pse = parent_entity(pse);
7436             }
7437             if (se_depth >= pse_depth) {
7438                 set_next_entity(cfs_rq_of(se), se);
7439                 se = parent_entity(se);
7440             }
7441         }
7442 
7443         put_prev_entity(cfs_rq, pse);
7444         set_next_entity(cfs_rq, se);
7445     }
7446 
7447     goto done;
7448 simple:
7449 #endif
7450     if (prev)
7451         put_prev_task(rq, prev);
7452 
7453     do {
7454         se = pick_next_entity(cfs_rq, NULL);
7455         set_next_entity(cfs_rq, se);
7456         cfs_rq = group_cfs_rq(se);
7457     } while (cfs_rq);
7458 
7459     p = task_of(se);
7460 
7461 done: __maybe_unused;
7462 #ifdef CONFIG_SMP
7463     /*
7464      * Move the next running task to the front of
7465      * the list, so our cfs_tasks list becomes MRU
7466      * one.
7467      */
7468     list_move(&p->se.group_node, &rq->cfs_tasks);
7469 #endif
7470 
7471     if (hrtick_enabled_fair(rq))
7472         hrtick_start_fair(rq, p);
7473 
7474     update_misfit_status(p, rq);
7475 
7476     return p;
7477 
7478 idle:
7479     if (!rf)
7480         return NULL;
7481 
7482     new_tasks = newidle_balance(rq, rf);
7483 
7484     /*
7485      * Because newidle_balance() releases (and re-acquires) rq->lock, it is
7486      * possible for any higher priority task to appear. In that case we
7487      * must re-start the pick_next_entity() loop.
7488      */
7489     if (new_tasks < 0)
7490         return RETRY_TASK;
7491 
7492     if (new_tasks > 0)
7493         goto again;
7494 
7495     /*
7496      * rq is about to be idle, check if we need to update the
7497      * lost_idle_time of clock_pelt
7498      */
7499     update_idle_rq_clock_pelt(rq);
7500 
7501     return NULL;
7502 }
7503 
7504 static struct task_struct *__pick_next_task_fair(struct rq *rq)
7505 {
7506     return pick_next_task_fair(rq, NULL, NULL);
7507 }
7508 
7509 /*
7510  * Account for a descheduled task:
7511  */
7512 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
7513 {
7514     struct sched_entity *se = &prev->se;
7515     struct cfs_rq *cfs_rq;
7516 
7517     for_each_sched_entity(se) {
7518         cfs_rq = cfs_rq_of(se);
7519         put_prev_entity(cfs_rq, se);
7520     }
7521 }
7522 
7523 /*
7524  * sched_yield() is very simple
7525  *
7526  * The magic of dealing with the ->skip buddy is in pick_next_entity.
7527  */
7528 static void yield_task_fair(struct rq *rq)
7529 {
7530     struct task_struct *curr = rq->curr;
7531     struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7532     struct sched_entity *se = &curr->se;
7533 
7534     /*
7535      * Are we the only task in the tree?
7536      */
7537     if (unlikely(rq->nr_running == 1))
7538         return;
7539 
7540     clear_buddies(cfs_rq, se);
7541 
7542     if (curr->policy != SCHED_BATCH) {
7543         update_rq_clock(rq);
7544         /*
7545          * Update run-time statistics of the 'current'.
7546          */
7547         update_curr(cfs_rq);
7548         /*
7549          * Tell update_rq_clock() that we've just updated,
7550          * so we don't do microscopic update in schedule()
7551          * and double the fastpath cost.
7552          */
7553         rq_clock_skip_update(rq);
7554     }
7555 
7556     set_skip_buddy(se);
7557 }
7558 
7559 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
7560 {
7561     struct sched_entity *se = &p->se;
7562 
7563     /* throttled hierarchies are not runnable */
7564     if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
7565         return false;
7566 
7567     /* Tell the scheduler that we'd really like pse to run next. */
7568     set_next_buddy(se);
7569 
7570     yield_task_fair(rq);
7571 
7572     return true;
7573 }
7574 
7575 #ifdef CONFIG_SMP
7576 /**************************************************
7577  * Fair scheduling class load-balancing methods.
7578  *
7579  * BASICS
7580  *
7581  * The purpose of load-balancing is to achieve the same basic fairness the
7582  * per-CPU scheduler provides, namely provide a proportional amount of compute
7583  * time to each task. This is expressed in the following equation:
7584  *
7585  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
7586  *
7587  * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
7588  * W_i,0 is defined as:
7589  *
7590  *   W_i,0 = \Sum_j w_i,j                                             (2)
7591  *
7592  * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
7593  * is derived from the nice value as per sched_prio_to_weight[].
7594  *
7595  * The weight average is an exponential decay average of the instantaneous
7596  * weight:
7597  *
7598  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
7599  *
7600  * C_i is the compute capacity of CPU i, typically it is the
7601  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
7602  * can also include other factors [XXX].
7603  *
7604  * To achieve this balance we define a measure of imbalance which follows
7605  * directly from (1):
7606  *
7607  *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
7608  *
7609  * We them move tasks around to minimize the imbalance. In the continuous
7610  * function space it is obvious this converges, in the discrete case we get
7611  * a few fun cases generally called infeasible weight scenarios.
7612  *
7613  * [XXX expand on:
7614  *     - infeasible weights;
7615  *     - local vs global optima in the discrete case. ]
7616  *
7617  *
7618  * SCHED DOMAINS
7619  *
7620  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
7621  * for all i,j solution, we create a tree of CPUs that follows the hardware
7622  * topology where each level pairs two lower groups (or better). This results
7623  * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
7624  * tree to only the first of the previous level and we decrease the frequency
7625  * of load-balance at each level inv. proportional to the number of CPUs in
7626  * the groups.
7627  *
7628  * This yields:
7629  *
7630  *     log_2 n     1     n
7631  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
7632  *     i = 0      2^i   2^i
7633  *                               `- size of each group
7634  *         |         |     `- number of CPUs doing load-balance
7635  *         |         `- freq
7636  *         `- sum over all levels
7637  *
7638  * Coupled with a limit on how many tasks we can migrate every balance pass,
7639  * this makes (5) the runtime complexity of the balancer.
7640  *
7641  * An important property here is that each CPU is still (indirectly) connected
7642  * to every other CPU in at most O(log n) steps:
7643  *
7644  * The adjacency matrix of the resulting graph is given by:
7645  *
7646  *             log_2 n
7647  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
7648  *             k = 0
7649  *
7650  * And you'll find that:
7651  *
7652  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
7653  *
7654  * Showing there's indeed a path between every CPU in at most O(log n) steps.
7655  * The task movement gives a factor of O(m), giving a convergence complexity
7656  * of:
7657  *
7658  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
7659  *
7660  *
7661  * WORK CONSERVING
7662  *
7663  * In order to avoid CPUs going idle while there's still work to do, new idle
7664  * balancing is more aggressive and has the newly idle CPU iterate up the domain
7665  * tree itself instead of relying on other CPUs to bring it work.
7666  *
7667  * This adds some complexity to both (5) and (8) but it reduces the total idle
7668  * time.
7669  *
7670  * [XXX more?]
7671  *
7672  *
7673  * CGROUPS
7674  *
7675  * Cgroups make a horror show out of (2), instead of a simple sum we get:
7676  *
7677  *                                s_k,i
7678  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
7679  *                                 S_k
7680  *
7681  * Where
7682  *
7683  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
7684  *
7685  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
7686  *
7687  * The big problem is S_k, its a global sum needed to compute a local (W_i)
7688  * property.
7689  *
7690  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
7691  *      rewrite all of this once again.]
7692  */
7693 
7694 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
7695 
7696 enum fbq_type { regular, remote, all };
7697 
7698 /*
7699  * 'group_type' describes the group of CPUs at the moment of load balancing.
7700  *
7701  * The enum is ordered by pulling priority, with the group with lowest priority
7702  * first so the group_type can simply be compared when selecting the busiest
7703  * group. See update_sd_pick_busiest().
7704  */
7705 enum group_type {
7706     /* The group has spare capacity that can be used to run more tasks.  */
7707     group_has_spare = 0,
7708     /*
7709      * The group is fully used and the tasks don't compete for more CPU
7710      * cycles. Nevertheless, some tasks might wait before running.
7711      */
7712     group_fully_busy,
7713     /*
7714      * One task doesn't fit with CPU's capacity and must be migrated to a
7715      * more powerful CPU.
7716      */
7717     group_misfit_task,
7718     /*
7719      * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
7720      * and the task should be migrated to it instead of running on the
7721      * current CPU.
7722      */
7723     group_asym_packing,
7724     /*
7725      * The tasks' affinity constraints previously prevented the scheduler
7726      * from balancing the load across the system.
7727      */
7728     group_imbalanced,
7729     /*
7730      * The CPU is overloaded and can't provide expected CPU cycles to all
7731      * tasks.
7732      */
7733     group_overloaded
7734 };
7735 
7736 enum migration_type {
7737     migrate_load = 0,
7738     migrate_util,
7739     migrate_task,
7740     migrate_misfit
7741 };
7742 
7743 #define LBF_ALL_PINNED  0x01
7744 #define LBF_NEED_BREAK  0x02
7745 #define LBF_DST_PINNED  0x04
7746 #define LBF_SOME_PINNED 0x08
7747 #define LBF_ACTIVE_LB   0x10
7748 
7749 struct lb_env {
7750     struct sched_domain *sd;
7751 
7752     struct rq       *src_rq;
7753     int         src_cpu;
7754 
7755     int         dst_cpu;
7756     struct rq       *dst_rq;
7757 
7758     struct cpumask      *dst_grpmask;
7759     int         new_dst_cpu;
7760     enum cpu_idle_type  idle;
7761     long            imbalance;
7762     /* The set of CPUs under consideration for load-balancing */
7763     struct cpumask      *cpus;
7764 
7765     unsigned int        flags;
7766 
7767     unsigned int        loop;
7768     unsigned int        loop_break;
7769     unsigned int        loop_max;
7770 
7771     enum fbq_type       fbq_type;
7772     enum migration_type migration_type;
7773     struct list_head    tasks;
7774 };
7775 
7776 /*
7777  * Is this task likely cache-hot:
7778  */
7779 static int task_hot(struct task_struct *p, struct lb_env *env)
7780 {
7781     s64 delta;
7782 
7783     lockdep_assert_rq_held(env->src_rq);
7784 
7785     if (p->sched_class != &fair_sched_class)
7786         return 0;
7787 
7788     if (unlikely(task_has_idle_policy(p)))
7789         return 0;
7790 
7791     /* SMT siblings share cache */
7792     if (env->sd->flags & SD_SHARE_CPUCAPACITY)
7793         return 0;
7794 
7795     /*
7796      * Buddy candidates are cache hot:
7797      */
7798     if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
7799             (&p->se == cfs_rq_of(&p->se)->next ||
7800              &p->se == cfs_rq_of(&p->se)->last))
7801         return 1;
7802 
7803     if (sysctl_sched_migration_cost == -1)
7804         return 1;
7805 
7806     /*
7807      * Don't migrate task if the task's cookie does not match
7808      * with the destination CPU's core cookie.
7809      */
7810     if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
7811         return 1;
7812 
7813     if (sysctl_sched_migration_cost == 0)
7814         return 0;
7815 
7816     delta = rq_clock_task(env->src_rq) - p->se.exec_start;
7817 
7818     return delta < (s64)sysctl_sched_migration_cost;
7819 }
7820 
7821 #ifdef CONFIG_NUMA_BALANCING
7822 /*
7823  * Returns 1, if task migration degrades locality
7824  * Returns 0, if task migration improves locality i.e migration preferred.
7825  * Returns -1, if task migration is not affected by locality.
7826  */
7827 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
7828 {
7829     struct numa_group *numa_group = rcu_dereference(p->numa_group);
7830     unsigned long src_weight, dst_weight;
7831     int src_nid, dst_nid, dist;
7832 
7833     if (!static_branch_likely(&sched_numa_balancing))
7834         return -1;
7835 
7836     if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
7837         return -1;
7838 
7839     src_nid = cpu_to_node(env->src_cpu);
7840     dst_nid = cpu_to_node(env->dst_cpu);
7841 
7842     if (src_nid == dst_nid)
7843         return -1;
7844 
7845     /* Migrating away from the preferred node is always bad. */
7846     if (src_nid == p->numa_preferred_nid) {
7847         if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
7848             return 1;
7849         else
7850             return -1;
7851     }
7852 
7853     /* Encourage migration to the preferred node. */
7854     if (dst_nid == p->numa_preferred_nid)
7855         return 0;
7856 
7857     /* Leaving a core idle is often worse than degrading locality. */
7858     if (env->idle == CPU_IDLE)
7859         return -1;
7860 
7861     dist = node_distance(src_nid, dst_nid);
7862     if (numa_group) {
7863         src_weight = group_weight(p, src_nid, dist);
7864         dst_weight = group_weight(p, dst_nid, dist);
7865     } else {
7866         src_weight = task_weight(p, src_nid, dist);
7867         dst_weight = task_weight(p, dst_nid, dist);
7868     }
7869 
7870     return dst_weight < src_weight;
7871 }
7872 
7873 #else
7874 static inline int migrate_degrades_locality(struct task_struct *p,
7875                          struct lb_env *env)
7876 {
7877     return -1;
7878 }
7879 #endif
7880 
7881 /*
7882  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
7883  */
7884 static
7885 int can_migrate_task(struct task_struct *p, struct lb_env *env)
7886 {
7887     int tsk_cache_hot;
7888 
7889     lockdep_assert_rq_held(env->src_rq);
7890 
7891     /*
7892      * We do not migrate tasks that are:
7893      * 1) throttled_lb_pair, or
7894      * 2) cannot be migrated to this CPU due to cpus_ptr, or
7895      * 3) running (obviously), or
7896      * 4) are cache-hot on their current CPU.
7897      */
7898     if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
7899         return 0;
7900 
7901     /* Disregard pcpu kthreads; they are where they need to be. */
7902     if (kthread_is_per_cpu(p))
7903         return 0;
7904 
7905     if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
7906         int cpu;
7907 
7908         schedstat_inc(p->stats.nr_failed_migrations_affine);
7909 
7910         env->flags |= LBF_SOME_PINNED;
7911 
7912         /*
7913          * Remember if this task can be migrated to any other CPU in
7914          * our sched_group. We may want to revisit it if we couldn't
7915          * meet load balance goals by pulling other tasks on src_cpu.
7916          *
7917          * Avoid computing new_dst_cpu
7918          * - for NEWLY_IDLE
7919          * - if we have already computed one in current iteration
7920          * - if it's an active balance
7921          */
7922         if (env->idle == CPU_NEWLY_IDLE ||
7923             env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB))
7924             return 0;
7925 
7926         /* Prevent to re-select dst_cpu via env's CPUs: */
7927         for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
7928             if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
7929                 env->flags |= LBF_DST_PINNED;
7930                 env->new_dst_cpu = cpu;
7931                 break;
7932             }
7933         }
7934 
7935         return 0;
7936     }
7937 
7938     /* Record that we found at least one task that could run on dst_cpu */
7939     env->flags &= ~LBF_ALL_PINNED;
7940 
7941     if (task_running(env->src_rq, p)) {
7942         schedstat_inc(p->stats.nr_failed_migrations_running);
7943         return 0;
7944     }
7945 
7946     /*
7947      * Aggressive migration if:
7948      * 1) active balance
7949      * 2) destination numa is preferred
7950      * 3) task is cache cold, or
7951      * 4) too many balance attempts have failed.
7952      */
7953     if (env->flags & LBF_ACTIVE_LB)
7954         return 1;
7955 
7956     tsk_cache_hot = migrate_degrades_locality(p, env);
7957     if (tsk_cache_hot == -1)
7958         tsk_cache_hot = task_hot(p, env);
7959 
7960     if (tsk_cache_hot <= 0 ||
7961         env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
7962         if (tsk_cache_hot == 1) {
7963             schedstat_inc(env->sd->lb_hot_gained[env->idle]);
7964             schedstat_inc(p->stats.nr_forced_migrations);
7965         }
7966         return 1;
7967     }
7968 
7969     schedstat_inc(p->stats.nr_failed_migrations_hot);
7970     return 0;
7971 }
7972 
7973 /*
7974  * detach_task() -- detach the task for the migration specified in env
7975  */
7976 static void detach_task(struct task_struct *p, struct lb_env *env)
7977 {
7978     lockdep_assert_rq_held(env->src_rq);
7979 
7980     deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
7981     set_task_cpu(p, env->dst_cpu);
7982 }
7983 
7984 /*
7985  * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
7986  * part of active balancing operations within "domain".
7987  *
7988  * Returns a task if successful and NULL otherwise.
7989  */
7990 static struct task_struct *detach_one_task(struct lb_env *env)
7991 {
7992     struct task_struct *p;
7993 
7994     lockdep_assert_rq_held(env->src_rq);
7995 
7996     list_for_each_entry_reverse(p,
7997             &env->src_rq->cfs_tasks, se.group_node) {
7998         if (!can_migrate_task(p, env))
7999             continue;
8000 
8001         detach_task(p, env);
8002 
8003         /*
8004          * Right now, this is only the second place where
8005          * lb_gained[env->idle] is updated (other is detach_tasks)
8006          * so we can safely collect stats here rather than
8007          * inside detach_tasks().
8008          */
8009         schedstat_inc(env->sd->lb_gained[env->idle]);
8010         return p;
8011     }
8012     return NULL;
8013 }
8014 
8015 static const unsigned int sched_nr_migrate_break = 32;
8016 
8017 /*
8018  * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
8019  * busiest_rq, as part of a balancing operation within domain "sd".
8020  *
8021  * Returns number of detached tasks if successful and 0 otherwise.
8022  */
8023 static int detach_tasks(struct lb_env *env)
8024 {
8025     struct list_head *tasks = &env->src_rq->cfs_tasks;
8026     unsigned long util, load;
8027     struct task_struct *p;
8028     int detached = 0;
8029 
8030     lockdep_assert_rq_held(env->src_rq);
8031 
8032     /*
8033      * Source run queue has been emptied by another CPU, clear
8034      * LBF_ALL_PINNED flag as we will not test any task.
8035      */
8036     if (env->src_rq->nr_running <= 1) {
8037         env->flags &= ~LBF_ALL_PINNED;
8038         return 0;
8039     }
8040 
8041     if (env->imbalance <= 0)
8042         return 0;
8043 
8044     while (!list_empty(tasks)) {
8045         /*
8046          * We don't want to steal all, otherwise we may be treated likewise,
8047          * which could at worst lead to a livelock crash.
8048          */
8049         if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
8050             break;
8051 
8052         p = list_last_entry(tasks, struct task_struct, se.group_node);
8053 
8054         env->loop++;
8055         /* We've more or less seen every task there is, call it quits */
8056         if (env->loop > env->loop_max)
8057             break;
8058 
8059         /* take a breather every nr_migrate tasks */
8060         if (env->loop > env->loop_break) {
8061             env->loop_break += sched_nr_migrate_break;
8062             env->flags |= LBF_NEED_BREAK;
8063             break;
8064         }
8065 
8066         if (!can_migrate_task(p, env))
8067             goto next;
8068 
8069         switch (env->migration_type) {
8070         case migrate_load:
8071             /*
8072              * Depending of the number of CPUs and tasks and the
8073              * cgroup hierarchy, task_h_load() can return a null
8074              * value. Make sure that env->imbalance decreases
8075              * otherwise detach_tasks() will stop only after
8076              * detaching up to loop_max tasks.
8077              */
8078             load = max_t(unsigned long, task_h_load(p), 1);
8079 
8080             if (sched_feat(LB_MIN) &&
8081                 load < 16 && !env->sd->nr_balance_failed)
8082                 goto next;
8083 
8084             /*
8085              * Make sure that we don't migrate too much load.
8086              * Nevertheless, let relax the constraint if
8087              * scheduler fails to find a good waiting task to
8088              * migrate.
8089              */
8090             if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
8091                 goto next;
8092 
8093             env->imbalance -= load;
8094             break;
8095 
8096         case migrate_util:
8097             util = task_util_est(p);
8098 
8099             if (util > env->imbalance)
8100                 goto next;
8101 
8102             env->imbalance -= util;
8103             break;
8104 
8105         case migrate_task:
8106             env->imbalance--;
8107             break;
8108 
8109         case migrate_misfit:
8110             /* This is not a misfit task */
8111             if (task_fits_capacity(p, capacity_of(env->src_cpu)))
8112                 goto next;
8113 
8114             env->imbalance = 0;
8115             break;
8116         }
8117 
8118         detach_task(p, env);
8119         list_add(&p->se.group_node, &env->tasks);
8120 
8121         detached++;
8122 
8123 #ifdef CONFIG_PREEMPTION
8124         /*
8125          * NEWIDLE balancing is a source of latency, so preemptible
8126          * kernels will stop after the first task is detached to minimize
8127          * the critical section.
8128          */
8129         if (env->idle == CPU_NEWLY_IDLE)
8130             break;
8131 #endif
8132 
8133         /*
8134          * We only want to steal up to the prescribed amount of
8135          * load/util/tasks.
8136          */
8137         if (env->imbalance <= 0)
8138             break;
8139 
8140         continue;
8141 next:
8142         list_move(&p->se.group_node, tasks);
8143     }
8144 
8145     /*
8146      * Right now, this is one of only two places we collect this stat
8147      * so we can safely collect detach_one_task() stats here rather
8148      * than inside detach_one_task().
8149      */
8150     schedstat_add(env->sd->lb_gained[env->idle], detached);
8151 
8152     return detached;
8153 }
8154 
8155 /*
8156  * attach_task() -- attach the task detached by detach_task() to its new rq.
8157  */
8158 static void attach_task(struct rq *rq, struct task_struct *p)
8159 {
8160     lockdep_assert_rq_held(rq);
8161 
8162     BUG_ON(task_rq(p) != rq);
8163     activate_task(rq, p, ENQUEUE_NOCLOCK);
8164     check_preempt_curr(rq, p, 0);
8165 }
8166 
8167 /*
8168  * attach_one_task() -- attaches the task returned from detach_one_task() to
8169  * its new rq.
8170  */
8171 static void attach_one_task(struct rq *rq, struct task_struct *p)
8172 {
8173     struct rq_flags rf;
8174 
8175     rq_lock(rq, &rf);
8176     update_rq_clock(rq);
8177     attach_task(rq, p);
8178     rq_unlock(rq, &rf);
8179 }
8180 
8181 /*
8182  * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
8183  * new rq.
8184  */
8185 static void attach_tasks(struct lb_env *env)
8186 {
8187     struct list_head *tasks = &env->tasks;
8188     struct task_struct *p;
8189     struct rq_flags rf;
8190 
8191     rq_lock(env->dst_rq, &rf);
8192     update_rq_clock(env->dst_rq);
8193 
8194     while (!list_empty(tasks)) {
8195         p = list_first_entry(tasks, struct task_struct, se.group_node);
8196         list_del_init(&p->se.group_node);
8197 
8198         attach_task(env->dst_rq, p);
8199     }
8200 
8201     rq_unlock(env->dst_rq, &rf);
8202 }
8203 
8204 #ifdef CONFIG_NO_HZ_COMMON
8205 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
8206 {
8207     if (cfs_rq->avg.load_avg)
8208         return true;
8209 
8210     if (cfs_rq->avg.util_avg)
8211         return true;
8212 
8213     return false;
8214 }
8215 
8216 static inline bool others_have_blocked(struct rq *rq)
8217 {
8218     if (READ_ONCE(rq->avg_rt.util_avg))
8219         return true;
8220 
8221     if (READ_ONCE(rq->avg_dl.util_avg))
8222         return true;
8223 
8224     if (thermal_load_avg(rq))
8225         return true;
8226 
8227 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
8228     if (READ_ONCE(rq->avg_irq.util_avg))
8229         return true;
8230 #endif
8231 
8232     return false;
8233 }
8234 
8235 static inline void update_blocked_load_tick(struct rq *rq)
8236 {
8237     WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
8238 }
8239 
8240 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
8241 {
8242     if (!has_blocked)
8243         rq->has_blocked_load = 0;
8244 }
8245 #else
8246 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
8247 static inline bool others_have_blocked(struct rq *rq) { return false; }
8248 static inline void update_blocked_load_tick(struct rq *rq) {}
8249 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
8250 #endif
8251 
8252 static bool __update_blocked_others(struct rq *rq, bool *done)
8253 {
8254     const struct sched_class *curr_class;
8255     u64 now = rq_clock_pelt(rq);
8256     unsigned long thermal_pressure;
8257     bool decayed;
8258 
8259     /*
8260      * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
8261      * DL and IRQ signals have been updated before updating CFS.
8262      */
8263     curr_class = rq->curr->sched_class;
8264 
8265     thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
8266 
8267     decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
8268           update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
8269           update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) |
8270           update_irq_load_avg(rq, 0);
8271 
8272     if (others_have_blocked(rq))
8273         *done = false;
8274 
8275     return decayed;
8276 }
8277 
8278 #ifdef CONFIG_FAIR_GROUP_SCHED
8279 
8280 static bool __update_blocked_fair(struct rq *rq, bool *done)
8281 {
8282     struct cfs_rq *cfs_rq, *pos;
8283     bool decayed = false;
8284     int cpu = cpu_of(rq);
8285 
8286     /*
8287      * Iterates the task_group tree in a bottom up fashion, see
8288      * list_add_leaf_cfs_rq() for details.
8289      */
8290     for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
8291         struct sched_entity *se;
8292 
8293         if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
8294             update_tg_load_avg(cfs_rq);
8295 
8296             if (cfs_rq->nr_running == 0)
8297                 update_idle_cfs_rq_clock_pelt(cfs_rq);
8298 
8299             if (cfs_rq == &rq->cfs)
8300                 decayed = true;
8301         }
8302 
8303         /* Propagate pending load changes to the parent, if any: */
8304         se = cfs_rq->tg->se[cpu];
8305         if (se && !skip_blocked_update(se))
8306             update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
8307 
8308         /*
8309          * There can be a lot of idle CPU cgroups.  Don't let fully
8310          * decayed cfs_rqs linger on the list.
8311          */
8312         if (cfs_rq_is_decayed(cfs_rq))
8313             list_del_leaf_cfs_rq(cfs_rq);
8314 
8315         /* Don't need periodic decay once load/util_avg are null */
8316         if (cfs_rq_has_blocked(cfs_rq))
8317             *done = false;
8318     }
8319 
8320     return decayed;
8321 }
8322 
8323 /*
8324  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
8325  * This needs to be done in a top-down fashion because the load of a child
8326  * group is a fraction of its parents load.
8327  */
8328 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
8329 {
8330     struct rq *rq = rq_of(cfs_rq);
8331     struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
8332     unsigned long now = jiffies;
8333     unsigned long load;
8334 
8335     if (cfs_rq->last_h_load_update == now)
8336         return;
8337 
8338     WRITE_ONCE(cfs_rq->h_load_next, NULL);
8339     for_each_sched_entity(se) {
8340         cfs_rq = cfs_rq_of(se);
8341         WRITE_ONCE(cfs_rq->h_load_next, se);
8342         if (cfs_rq->last_h_load_update == now)
8343             break;
8344     }
8345 
8346     if (!se) {
8347         cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
8348         cfs_rq->last_h_load_update = now;
8349     }
8350 
8351     while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
8352         load = cfs_rq->h_load;
8353         load = div64_ul(load * se->avg.load_avg,
8354             cfs_rq_load_avg(cfs_rq) + 1);
8355         cfs_rq = group_cfs_rq(se);
8356         cfs_rq->h_load = load;
8357         cfs_rq->last_h_load_update = now;
8358     }
8359 }
8360 
8361 static unsigned long task_h_load(struct task_struct *p)
8362 {
8363     struct cfs_rq *cfs_rq = task_cfs_rq(p);
8364 
8365     update_cfs_rq_h_load(cfs_rq);
8366     return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
8367             cfs_rq_load_avg(cfs_rq) + 1);
8368 }
8369 #else
8370 static bool __update_blocked_fair(struct rq *rq, bool *done)
8371 {
8372     struct cfs_rq *cfs_rq = &rq->cfs;
8373     bool decayed;
8374 
8375     decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
8376     if (cfs_rq_has_blocked(cfs_rq))
8377         *done = false;
8378 
8379     return decayed;
8380 }
8381 
8382 static unsigned long task_h_load(struct task_struct *p)
8383 {
8384     return p->se.avg.load_avg;
8385 }
8386 #endif
8387 
8388 static void update_blocked_averages(int cpu)
8389 {
8390     bool decayed = false, done = true;
8391     struct rq *rq = cpu_rq(cpu);
8392     struct rq_flags rf;
8393 
8394     rq_lock_irqsave(rq, &rf);
8395     update_blocked_load_tick(rq);
8396     update_rq_clock(rq);
8397 
8398     decayed |= __update_blocked_others(rq, &done);
8399     decayed |= __update_blocked_fair(rq, &done);
8400 
8401     update_blocked_load_status(rq, !done);
8402     if (decayed)
8403         cpufreq_update_util(rq, 0);
8404     rq_unlock_irqrestore(rq, &rf);
8405 }
8406 
8407 /********** Helpers for find_busiest_group ************************/
8408 
8409 /*
8410  * sg_lb_stats - stats of a sched_group required for load_balancing
8411  */
8412 struct sg_lb_stats {
8413     unsigned long avg_load; /*Avg load across the CPUs of the group */
8414     unsigned long group_load; /* Total load over the CPUs of the group */
8415     unsigned long group_capacity;
8416     unsigned long group_util; /* Total utilization over the CPUs of the group */
8417     unsigned long group_runnable; /* Total runnable time over the CPUs of the group */
8418     unsigned int sum_nr_running; /* Nr of tasks running in the group */
8419     unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
8420     unsigned int idle_cpus;
8421     unsigned int group_weight;
8422     enum group_type group_type;
8423     unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
8424     unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
8425 #ifdef CONFIG_NUMA_BALANCING
8426     unsigned int nr_numa_running;
8427     unsigned int nr_preferred_running;
8428 #endif
8429 };
8430 
8431 /*
8432  * sd_lb_stats - Structure to store the statistics of a sched_domain
8433  *       during load balancing.
8434  */
8435 struct sd_lb_stats {
8436     struct sched_group *busiest;    /* Busiest group in this sd */
8437     struct sched_group *local;  /* Local group in this sd */
8438     unsigned long total_load;   /* Total load of all groups in sd */
8439     unsigned long total_capacity;   /* Total capacity of all groups in sd */
8440     unsigned long avg_load; /* Average load across all groups in sd */
8441     unsigned int prefer_sibling; /* tasks should go to sibling first */
8442 
8443     struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
8444     struct sg_lb_stats local_stat;  /* Statistics of the local group */
8445 };
8446 
8447 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
8448 {
8449     /*
8450      * Skimp on the clearing to avoid duplicate work. We can avoid clearing
8451      * local_stat because update_sg_lb_stats() does a full clear/assignment.
8452      * We must however set busiest_stat::group_type and
8453      * busiest_stat::idle_cpus to the worst busiest group because
8454      * update_sd_pick_busiest() reads these before assignment.
8455      */
8456     *sds = (struct sd_lb_stats){
8457         .busiest = NULL,
8458         .local = NULL,
8459         .total_load = 0UL,
8460         .total_capacity = 0UL,
8461         .busiest_stat = {
8462             .idle_cpus = UINT_MAX,
8463             .group_type = group_has_spare,
8464         },
8465     };
8466 }
8467 
8468 static unsigned long scale_rt_capacity(int cpu)
8469 {
8470     struct rq *rq = cpu_rq(cpu);
8471     unsigned long max = arch_scale_cpu_capacity(cpu);
8472     unsigned long used, free;
8473     unsigned long irq;
8474 
8475     irq = cpu_util_irq(rq);
8476 
8477     if (unlikely(irq >= max))
8478         return 1;
8479 
8480     /*
8481      * avg_rt.util_avg and avg_dl.util_avg track binary signals
8482      * (running and not running) with weights 0 and 1024 respectively.
8483      * avg_thermal.load_avg tracks thermal pressure and the weighted
8484      * average uses the actual delta max capacity(load).
8485      */
8486     used = READ_ONCE(rq->avg_rt.util_avg);
8487     used += READ_ONCE(rq->avg_dl.util_avg);
8488     used += thermal_load_avg(rq);
8489 
8490     if (unlikely(used >= max))
8491         return 1;
8492 
8493     free = max - used;
8494 
8495     return scale_irq_capacity(free, irq, max);
8496 }
8497 
8498 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
8499 {
8500     unsigned long capacity = scale_rt_capacity(cpu);
8501     struct sched_group *sdg = sd->groups;
8502 
8503     cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
8504 
8505     if (!capacity)
8506         capacity = 1;
8507 
8508     cpu_rq(cpu)->cpu_capacity = capacity;
8509     trace_sched_cpu_capacity_tp(cpu_rq(cpu));
8510 
8511     sdg->sgc->capacity = capacity;
8512     sdg->sgc->min_capacity = capacity;
8513     sdg->sgc->max_capacity = capacity;
8514 }
8515 
8516 void update_group_capacity(struct sched_domain *sd, int cpu)
8517 {
8518     struct sched_domain *child = sd->child;
8519     struct sched_group *group, *sdg = sd->groups;
8520     unsigned long capacity, min_capacity, max_capacity;
8521     unsigned long interval;
8522 
8523     interval = msecs_to_jiffies(sd->balance_interval);
8524     interval = clamp(interval, 1UL, max_load_balance_interval);
8525     sdg->sgc->next_update = jiffies + interval;
8526 
8527     if (!child) {
8528         update_cpu_capacity(sd, cpu);
8529         return;
8530     }
8531 
8532     capacity = 0;
8533     min_capacity = ULONG_MAX;
8534     max_capacity = 0;
8535 
8536     if (child->flags & SD_OVERLAP) {
8537         /*
8538          * SD_OVERLAP domains cannot assume that child groups
8539          * span the current group.
8540          */
8541 
8542         for_each_cpu(cpu, sched_group_span(sdg)) {
8543             unsigned long cpu_cap = capacity_of(cpu);
8544 
8545             capacity += cpu_cap;
8546             min_capacity = min(cpu_cap, min_capacity);
8547             max_capacity = max(cpu_cap, max_capacity);
8548         }
8549     } else  {
8550         /*
8551          * !SD_OVERLAP domains can assume that child groups
8552          * span the current group.
8553          */
8554 
8555         group = child->groups;
8556         do {
8557             struct sched_group_capacity *sgc = group->sgc;
8558 
8559             capacity += sgc->capacity;
8560             min_capacity = min(sgc->min_capacity, min_capacity);
8561             max_capacity = max(sgc->max_capacity, max_capacity);
8562             group = group->next;
8563         } while (group != child->groups);
8564     }
8565 
8566     sdg->sgc->capacity = capacity;
8567     sdg->sgc->min_capacity = min_capacity;
8568     sdg->sgc->max_capacity = max_capacity;
8569 }
8570 
8571 /*
8572  * Check whether the capacity of the rq has been noticeably reduced by side
8573  * activity. The imbalance_pct is used for the threshold.
8574  * Return true is the capacity is reduced
8575  */
8576 static inline int
8577 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
8578 {
8579     return ((rq->cpu_capacity * sd->imbalance_pct) <
8580                 (rq->cpu_capacity_orig * 100));
8581 }
8582 
8583 /*
8584  * Check whether a rq has a misfit task and if it looks like we can actually
8585  * help that task: we can migrate the task to a CPU of higher capacity, or
8586  * the task's current CPU is heavily pressured.
8587  */
8588 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
8589 {
8590     return rq->misfit_task_load &&
8591         (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
8592          check_cpu_capacity(rq, sd));
8593 }
8594 
8595 /*
8596  * Group imbalance indicates (and tries to solve) the problem where balancing
8597  * groups is inadequate due to ->cpus_ptr constraints.
8598  *
8599  * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
8600  * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
8601  * Something like:
8602  *
8603  *  { 0 1 2 3 } { 4 5 6 7 }
8604  *          *     * * *
8605  *
8606  * If we were to balance group-wise we'd place two tasks in the first group and
8607  * two tasks in the second group. Clearly this is undesired as it will overload
8608  * cpu 3 and leave one of the CPUs in the second group unused.
8609  *
8610  * The current solution to this issue is detecting the skew in the first group
8611  * by noticing the lower domain failed to reach balance and had difficulty
8612  * moving tasks due to affinity constraints.
8613  *
8614  * When this is so detected; this group becomes a candidate for busiest; see
8615  * update_sd_pick_busiest(). And calculate_imbalance() and
8616  * find_busiest_group() avoid some of the usual balance conditions to allow it
8617  * to create an effective group imbalance.
8618  *
8619  * This is a somewhat tricky proposition since the next run might not find the
8620  * group imbalance and decide the groups need to be balanced again. A most
8621  * subtle and fragile situation.
8622  */
8623 
8624 static inline int sg_imbalanced(struct sched_group *group)
8625 {
8626     return group->sgc->imbalance;
8627 }
8628 
8629 /*
8630  * group_has_capacity returns true if the group has spare capacity that could
8631  * be used by some tasks.
8632  * We consider that a group has spare capacity if the number of task is
8633  * smaller than the number of CPUs or if the utilization is lower than the
8634  * available capacity for CFS tasks.
8635  * For the latter, we use a threshold to stabilize the state, to take into
8636  * account the variance of the tasks' load and to return true if the available
8637  * capacity in meaningful for the load balancer.
8638  * As an example, an available capacity of 1% can appear but it doesn't make
8639  * any benefit for the load balance.
8640  */
8641 static inline bool
8642 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
8643 {
8644     if (sgs->sum_nr_running < sgs->group_weight)
8645         return true;
8646 
8647     if ((sgs->group_capacity * imbalance_pct) <
8648             (sgs->group_runnable * 100))
8649         return false;
8650 
8651     if ((sgs->group_capacity * 100) >
8652             (sgs->group_util * imbalance_pct))
8653         return true;
8654 
8655     return false;
8656 }
8657 
8658 /*
8659  *  group_is_overloaded returns true if the group has more tasks than it can
8660  *  handle.
8661  *  group_is_overloaded is not equals to !group_has_capacity because a group
8662  *  with the exact right number of tasks, has no more spare capacity but is not
8663  *  overloaded so both group_has_capacity and group_is_overloaded return
8664  *  false.
8665  */
8666 static inline bool
8667 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
8668 {
8669     if (sgs->sum_nr_running <= sgs->group_weight)
8670         return false;
8671 
8672     if ((sgs->group_capacity * 100) <
8673             (sgs->group_util * imbalance_pct))
8674         return true;
8675 
8676     if ((sgs->group_capacity * imbalance_pct) <
8677             (sgs->group_runnable * 100))
8678         return true;
8679 
8680     return false;
8681 }
8682 
8683 static inline enum
8684 group_type group_classify(unsigned int imbalance_pct,
8685               struct sched_group *group,
8686               struct sg_lb_stats *sgs)
8687 {
8688     if (group_is_overloaded(imbalance_pct, sgs))
8689         return group_overloaded;
8690 
8691     if (sg_imbalanced(group))
8692         return group_imbalanced;
8693 
8694     if (sgs->group_asym_packing)
8695         return group_asym_packing;
8696 
8697     if (sgs->group_misfit_task_load)
8698         return group_misfit_task;
8699 
8700     if (!group_has_capacity(imbalance_pct, sgs))
8701         return group_fully_busy;
8702 
8703     return group_has_spare;
8704 }
8705 
8706 /**
8707  * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks
8708  * @dst_cpu:    Destination CPU of the load balancing
8709  * @sds:    Load-balancing data with statistics of the local group
8710  * @sgs:    Load-balancing statistics of the candidate busiest group
8711  * @sg:     The candidate busiest group
8712  *
8713  * Check the state of the SMT siblings of both @sds::local and @sg and decide
8714  * if @dst_cpu can pull tasks.
8715  *
8716  * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of
8717  * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks
8718  * only if @dst_cpu has higher priority.
8719  *
8720  * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more
8721  * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority.
8722  * Bigger imbalances in the number of busy CPUs will be dealt with in
8723  * update_sd_pick_busiest().
8724  *
8725  * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
8726  * of @dst_cpu are idle and @sg has lower priority.
8727  *
8728  * Return: true if @dst_cpu can pull tasks, false otherwise.
8729  */
8730 static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
8731                     struct sg_lb_stats *sgs,
8732                     struct sched_group *sg)
8733 {
8734 #ifdef CONFIG_SCHED_SMT
8735     bool local_is_smt, sg_is_smt;
8736     int sg_busy_cpus;
8737 
8738     local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY;
8739     sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY;
8740 
8741     sg_busy_cpus = sgs->group_weight - sgs->idle_cpus;
8742 
8743     if (!local_is_smt) {
8744         /*
8745          * If we are here, @dst_cpu is idle and does not have SMT
8746          * siblings. Pull tasks if candidate group has two or more
8747          * busy CPUs.
8748          */
8749         if (sg_busy_cpus >= 2) /* implies sg_is_smt */
8750             return true;
8751 
8752         /*
8753          * @dst_cpu does not have SMT siblings. @sg may have SMT
8754          * siblings and only one is busy. In such case, @dst_cpu
8755          * can help if it has higher priority and is idle (i.e.,
8756          * it has no running tasks).
8757          */
8758         return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
8759     }
8760 
8761     /* @dst_cpu has SMT siblings. */
8762 
8763     if (sg_is_smt) {
8764         int local_busy_cpus = sds->local->group_weight -
8765                       sds->local_stat.idle_cpus;
8766         int busy_cpus_delta = sg_busy_cpus - local_busy_cpus;
8767 
8768         if (busy_cpus_delta == 1)
8769             return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
8770 
8771         return false;
8772     }
8773 
8774     /*
8775      * @sg does not have SMT siblings. Ensure that @sds::local does not end
8776      * up with more than one busy SMT sibling and only pull tasks if there
8777      * are not busy CPUs (i.e., no CPU has running tasks).
8778      */
8779     if (!sds->local_stat.sum_nr_running)
8780         return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
8781 
8782     return false;
8783 #else
8784     /* Always return false so that callers deal with non-SMT cases. */
8785     return false;
8786 #endif
8787 }
8788 
8789 static inline bool
8790 sched_asym(struct lb_env *env, struct sd_lb_stats *sds,  struct sg_lb_stats *sgs,
8791        struct sched_group *group)
8792 {
8793     /* Only do SMT checks if either local or candidate have SMT siblings */
8794     if ((sds->local->flags & SD_SHARE_CPUCAPACITY) ||
8795         (group->flags & SD_SHARE_CPUCAPACITY))
8796         return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group);
8797 
8798     return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
8799 }
8800 
8801 static inline bool
8802 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
8803 {
8804     /*
8805      * When there is more than 1 task, the group_overloaded case already
8806      * takes care of cpu with reduced capacity
8807      */
8808     if (rq->cfs.h_nr_running != 1)
8809         return false;
8810 
8811     return check_cpu_capacity(rq, sd);
8812 }
8813 
8814 /**
8815  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
8816  * @env: The load balancing environment.
8817  * @sds: Load-balancing data with statistics of the local group.
8818  * @group: sched_group whose statistics are to be updated.
8819  * @sgs: variable to hold the statistics for this group.
8820  * @sg_status: Holds flag indicating the status of the sched_group
8821  */
8822 static inline void update_sg_lb_stats(struct lb_env *env,
8823                       struct sd_lb_stats *sds,
8824                       struct sched_group *group,
8825                       struct sg_lb_stats *sgs,
8826                       int *sg_status)
8827 {
8828     int i, nr_running, local_group;
8829 
8830     memset(sgs, 0, sizeof(*sgs));
8831 
8832     local_group = group == sds->local;
8833 
8834     for_each_cpu_and(i, sched_group_span(group), env->cpus) {
8835         struct rq *rq = cpu_rq(i);
8836         unsigned long load = cpu_load(rq);
8837 
8838         sgs->group_load += load;
8839         sgs->group_util += cpu_util_cfs(i);
8840         sgs->group_runnable += cpu_runnable(rq);
8841         sgs->sum_h_nr_running += rq->cfs.h_nr_running;
8842 
8843         nr_running = rq->nr_running;
8844         sgs->sum_nr_running += nr_running;
8845 
8846         if (nr_running > 1)
8847             *sg_status |= SG_OVERLOAD;
8848 
8849         if (cpu_overutilized(i))
8850             *sg_status |= SG_OVERUTILIZED;
8851 
8852 #ifdef CONFIG_NUMA_BALANCING
8853         sgs->nr_numa_running += rq->nr_numa_running;
8854         sgs->nr_preferred_running += rq->nr_preferred_running;
8855 #endif
8856         /*
8857          * No need to call idle_cpu() if nr_running is not 0
8858          */
8859         if (!nr_running && idle_cpu(i)) {
8860             sgs->idle_cpus++;
8861             /* Idle cpu can't have misfit task */
8862             continue;
8863         }
8864 
8865         if (local_group)
8866             continue;
8867 
8868         if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
8869             /* Check for a misfit task on the cpu */
8870             if (sgs->group_misfit_task_load < rq->misfit_task_load) {
8871                 sgs->group_misfit_task_load = rq->misfit_task_load;
8872                 *sg_status |= SG_OVERLOAD;
8873             }
8874         } else if ((env->idle != CPU_NOT_IDLE) &&
8875                sched_reduced_capacity(rq, env->sd)) {
8876             /* Check for a task running on a CPU with reduced capacity */
8877             if (sgs->group_misfit_task_load < load)
8878                 sgs->group_misfit_task_load = load;
8879         }
8880     }
8881 
8882     sgs->group_capacity = group->sgc->capacity;
8883 
8884     sgs->group_weight = group->group_weight;
8885 
8886     /* Check if dst CPU is idle and preferred to this group */
8887     if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
8888         env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
8889         sched_asym(env, sds, sgs, group)) {
8890         sgs->group_asym_packing = 1;
8891     }
8892 
8893     sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
8894 
8895     /* Computing avg_load makes sense only when group is overloaded */
8896     if (sgs->group_type == group_overloaded)
8897         sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
8898                 sgs->group_capacity;
8899 }
8900 
8901 /**
8902  * update_sd_pick_busiest - return 1 on busiest group
8903  * @env: The load balancing environment.
8904  * @sds: sched_domain statistics
8905  * @sg: sched_group candidate to be checked for being the busiest
8906  * @sgs: sched_group statistics
8907  *
8908  * Determine if @sg is a busier group than the previously selected
8909  * busiest group.
8910  *
8911  * Return: %true if @sg is a busier group than the previously selected
8912  * busiest group. %false otherwise.
8913  */
8914 static bool update_sd_pick_busiest(struct lb_env *env,
8915                    struct sd_lb_stats *sds,
8916                    struct sched_group *sg,
8917                    struct sg_lb_stats *sgs)
8918 {
8919     struct sg_lb_stats *busiest = &sds->busiest_stat;
8920 
8921     /* Make sure that there is at least one task to pull */
8922     if (!sgs->sum_h_nr_running)
8923         return false;
8924 
8925     /*
8926      * Don't try to pull misfit tasks we can't help.
8927      * We can use max_capacity here as reduction in capacity on some
8928      * CPUs in the group should either be possible to resolve
8929      * internally or be covered by avg_load imbalance (eventually).
8930      */
8931     if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
8932         (sgs->group_type == group_misfit_task) &&
8933         (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
8934          sds->local_stat.group_type != group_has_spare))
8935         return false;
8936 
8937     if (sgs->group_type > busiest->group_type)
8938         return true;
8939 
8940     if (sgs->group_type < busiest->group_type)
8941         return false;
8942 
8943     /*
8944      * The candidate and the current busiest group are the same type of
8945      * group. Let check which one is the busiest according to the type.
8946      */
8947 
8948     switch (sgs->group_type) {
8949     case group_overloaded:
8950         /* Select the overloaded group with highest avg_load. */
8951         if (sgs->avg_load <= busiest->avg_load)
8952             return false;
8953         break;
8954 
8955     case group_imbalanced:
8956         /*
8957          * Select the 1st imbalanced group as we don't have any way to
8958          * choose one more than another.
8959          */
8960         return false;
8961 
8962     case group_asym_packing:
8963         /* Prefer to move from lowest priority CPU's work */
8964         if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
8965             return false;
8966         break;
8967 
8968     case group_misfit_task:
8969         /*
8970          * If we have more than one misfit sg go with the biggest
8971          * misfit.
8972          */
8973         if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
8974             return false;
8975         break;
8976 
8977     case group_fully_busy:
8978         /*
8979          * Select the fully busy group with highest avg_load. In
8980          * theory, there is no need to pull task from such kind of
8981          * group because tasks have all compute capacity that they need
8982          * but we can still improve the overall throughput by reducing
8983          * contention when accessing shared HW resources.
8984          *
8985          * XXX for now avg_load is not computed and always 0 so we
8986          * select the 1st one.
8987          */
8988         if (sgs->avg_load <= busiest->avg_load)
8989             return false;
8990         break;
8991 
8992     case group_has_spare:
8993         /*
8994          * Select not overloaded group with lowest number of idle cpus
8995          * and highest number of running tasks. We could also compare
8996          * the spare capacity which is more stable but it can end up
8997          * that the group has less spare capacity but finally more idle
8998          * CPUs which means less opportunity to pull tasks.
8999          */
9000         if (sgs->idle_cpus > busiest->idle_cpus)
9001             return false;
9002         else if ((sgs->idle_cpus == busiest->idle_cpus) &&
9003              (sgs->sum_nr_running <= busiest->sum_nr_running))
9004             return false;
9005 
9006         break;
9007     }
9008 
9009     /*
9010      * Candidate sg has no more than one task per CPU and has higher
9011      * per-CPU capacity. Migrating tasks to less capable CPUs may harm
9012      * throughput. Maximize throughput, power/energy consequences are not
9013      * considered.
9014      */
9015     if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
9016         (sgs->group_type <= group_fully_busy) &&
9017         (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
9018         return false;
9019 
9020     return true;
9021 }
9022 
9023 #ifdef CONFIG_NUMA_BALANCING
9024 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
9025 {
9026     if (sgs->sum_h_nr_running > sgs->nr_numa_running)
9027         return regular;
9028     if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
9029         return remote;
9030     return all;
9031 }
9032 
9033 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
9034 {
9035     if (rq->nr_running > rq->nr_numa_running)
9036         return regular;
9037     if (rq->nr_running > rq->nr_preferred_running)
9038         return remote;
9039     return all;
9040 }
9041 #else
9042 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
9043 {
9044     return all;
9045 }
9046 
9047 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
9048 {
9049     return regular;
9050 }
9051 #endif /* CONFIG_NUMA_BALANCING */
9052 
9053 
9054 struct sg_lb_stats;
9055 
9056 /*
9057  * task_running_on_cpu - return 1 if @p is running on @cpu.
9058  */
9059 
9060 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
9061 {
9062     /* Task has no contribution or is new */
9063     if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
9064         return 0;
9065 
9066     if (task_on_rq_queued(p))
9067         return 1;
9068 
9069     return 0;
9070 }
9071 
9072 /**
9073  * idle_cpu_without - would a given CPU be idle without p ?
9074  * @cpu: the processor on which idleness is tested.
9075  * @p: task which should be ignored.
9076  *
9077  * Return: 1 if the CPU would be idle. 0 otherwise.
9078  */
9079 static int idle_cpu_without(int cpu, struct task_struct *p)
9080 {
9081     struct rq *rq = cpu_rq(cpu);
9082 
9083     if (rq->curr != rq->idle && rq->curr != p)
9084         return 0;
9085 
9086     /*
9087      * rq->nr_running can't be used but an updated version without the
9088      * impact of p on cpu must be used instead. The updated nr_running
9089      * be computed and tested before calling idle_cpu_without().
9090      */
9091 
9092 #ifdef CONFIG_SMP
9093     if (rq->ttwu_pending)
9094         return 0;
9095 #endif
9096 
9097     return 1;
9098 }
9099 
9100 /*
9101  * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
9102  * @sd: The sched_domain level to look for idlest group.
9103  * @group: sched_group whose statistics are to be updated.
9104  * @sgs: variable to hold the statistics for this group.
9105  * @p: The task for which we look for the idlest group/CPU.
9106  */
9107 static inline void update_sg_wakeup_stats(struct sched_domain *sd,
9108                       struct sched_group *group,
9109                       struct sg_lb_stats *sgs,
9110                       struct task_struct *p)
9111 {
9112     int i, nr_running;
9113 
9114     memset(sgs, 0, sizeof(*sgs));
9115 
9116     for_each_cpu(i, sched_group_span(group)) {
9117         struct rq *rq = cpu_rq(i);
9118         unsigned int local;
9119 
9120         sgs->group_load += cpu_load_without(rq, p);
9121         sgs->group_util += cpu_util_without(i, p);
9122         sgs->group_runnable += cpu_runnable_without(rq, p);
9123         local = task_running_on_cpu(i, p);
9124         sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
9125 
9126         nr_running = rq->nr_running - local;
9127         sgs->sum_nr_running += nr_running;
9128 
9129         /*
9130          * No need to call idle_cpu_without() if nr_running is not 0
9131          */
9132         if (!nr_running && idle_cpu_without(i, p))
9133             sgs->idle_cpus++;
9134 
9135     }
9136 
9137     /* Check if task fits in the group */
9138     if (sd->flags & SD_ASYM_CPUCAPACITY &&
9139         !task_fits_capacity(p, group->sgc->max_capacity)) {
9140         sgs->group_misfit_task_load = 1;
9141     }
9142 
9143     sgs->group_capacity = group->sgc->capacity;
9144 
9145     sgs->group_weight = group->group_weight;
9146 
9147     sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
9148 
9149     /*
9150      * Computing avg_load makes sense only when group is fully busy or
9151      * overloaded
9152      */
9153     if (sgs->group_type == group_fully_busy ||
9154         sgs->group_type == group_overloaded)
9155         sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
9156                 sgs->group_capacity;
9157 }
9158 
9159 static bool update_pick_idlest(struct sched_group *idlest,
9160                    struct sg_lb_stats *idlest_sgs,
9161                    struct sched_group *group,
9162                    struct sg_lb_stats *sgs)
9163 {
9164     if (sgs->group_type < idlest_sgs->group_type)
9165         return true;
9166 
9167     if (sgs->group_type > idlest_sgs->group_type)
9168         return false;
9169 
9170     /*
9171      * The candidate and the current idlest group are the same type of
9172      * group. Let check which one is the idlest according to the type.
9173      */
9174 
9175     switch (sgs->group_type) {
9176     case group_overloaded:
9177     case group_fully_busy:
9178         /* Select the group with lowest avg_load. */
9179         if (idlest_sgs->avg_load <= sgs->avg_load)
9180             return false;
9181         break;
9182 
9183     case group_imbalanced:
9184     case group_asym_packing:
9185         /* Those types are not used in the slow wakeup path */
9186         return false;
9187 
9188     case group_misfit_task:
9189         /* Select group with the highest max capacity */
9190         if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
9191             return false;
9192         break;
9193 
9194     case group_has_spare:
9195         /* Select group with most idle CPUs */
9196         if (idlest_sgs->idle_cpus > sgs->idle_cpus)
9197             return false;
9198 
9199         /* Select group with lowest group_util */
9200         if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
9201             idlest_sgs->group_util <= sgs->group_util)
9202             return false;
9203 
9204         break;
9205     }
9206 
9207     return true;
9208 }
9209 
9210 /*
9211  * find_idlest_group() finds and returns the least busy CPU group within the
9212  * domain.
9213  *
9214  * Assumes p is allowed on at least one CPU in sd.
9215  */
9216 static struct sched_group *
9217 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
9218 {
9219     struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
9220     struct sg_lb_stats local_sgs, tmp_sgs;
9221     struct sg_lb_stats *sgs;
9222     unsigned long imbalance;
9223     struct sg_lb_stats idlest_sgs = {
9224             .avg_load = UINT_MAX,
9225             .group_type = group_overloaded,
9226     };
9227 
9228     do {
9229         int local_group;
9230 
9231         /* Skip over this group if it has no CPUs allowed */
9232         if (!cpumask_intersects(sched_group_span(group),
9233                     p->cpus_ptr))
9234             continue;
9235 
9236         /* Skip over this group if no cookie matched */
9237         if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
9238             continue;
9239 
9240         local_group = cpumask_test_cpu(this_cpu,
9241                            sched_group_span(group));
9242 
9243         if (local_group) {
9244             sgs = &local_sgs;
9245             local = group;
9246         } else {
9247             sgs = &tmp_sgs;
9248         }
9249 
9250         update_sg_wakeup_stats(sd, group, sgs, p);
9251 
9252         if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
9253             idlest = group;
9254             idlest_sgs = *sgs;
9255         }
9256 
9257     } while (group = group->next, group != sd->groups);
9258 
9259 
9260     /* There is no idlest group to push tasks to */
9261     if (!idlest)
9262         return NULL;
9263 
9264     /* The local group has been skipped because of CPU affinity */
9265     if (!local)
9266         return idlest;
9267 
9268     /*
9269      * If the local group is idler than the selected idlest group
9270      * don't try and push the task.
9271      */
9272     if (local_sgs.group_type < idlest_sgs.group_type)
9273         return NULL;
9274 
9275     /*
9276      * If the local group is busier than the selected idlest group
9277      * try and push the task.
9278      */
9279     if (local_sgs.group_type > idlest_sgs.group_type)
9280         return idlest;
9281 
9282     switch (local_sgs.group_type) {
9283     case group_overloaded:
9284     case group_fully_busy:
9285 
9286         /* Calculate allowed imbalance based on load */
9287         imbalance = scale_load_down(NICE_0_LOAD) *
9288                 (sd->imbalance_pct-100) / 100;
9289 
9290         /*
9291          * When comparing groups across NUMA domains, it's possible for
9292          * the local domain to be very lightly loaded relative to the
9293          * remote domains but "imbalance" skews the comparison making
9294          * remote CPUs look much more favourable. When considering
9295          * cross-domain, add imbalance to the load on the remote node
9296          * and consider staying local.
9297          */
9298 
9299         if ((sd->flags & SD_NUMA) &&
9300             ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
9301             return NULL;
9302 
9303         /*
9304          * If the local group is less loaded than the selected
9305          * idlest group don't try and push any tasks.
9306          */
9307         if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
9308             return NULL;
9309 
9310         if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
9311             return NULL;
9312         break;
9313 
9314     case group_imbalanced:
9315     case group_asym_packing:
9316         /* Those type are not used in the slow wakeup path */
9317         return NULL;
9318 
9319     case group_misfit_task:
9320         /* Select group with the highest max capacity */
9321         if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
9322             return NULL;
9323         break;
9324 
9325     case group_has_spare:
9326 #ifdef CONFIG_NUMA
9327         if (sd->flags & SD_NUMA) {
9328             int imb_numa_nr = sd->imb_numa_nr;
9329 #ifdef CONFIG_NUMA_BALANCING
9330             int idlest_cpu;
9331             /*
9332              * If there is spare capacity at NUMA, try to select
9333              * the preferred node
9334              */
9335             if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
9336                 return NULL;
9337 
9338             idlest_cpu = cpumask_first(sched_group_span(idlest));
9339             if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
9340                 return idlest;
9341 #endif /* CONFIG_NUMA_BALANCING */
9342             /*
9343              * Otherwise, keep the task close to the wakeup source
9344              * and improve locality if the number of running tasks
9345              * would remain below threshold where an imbalance is
9346              * allowed while accounting for the possibility the
9347              * task is pinned to a subset of CPUs. If there is a
9348              * real need of migration, periodic load balance will
9349              * take care of it.
9350              */
9351             if (p->nr_cpus_allowed != NR_CPUS) {
9352                 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
9353 
9354                 cpumask_and(cpus, sched_group_span(local), p->cpus_ptr);
9355                 imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr);
9356             }
9357 
9358             imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);
9359             if (!adjust_numa_imbalance(imbalance,
9360                            local_sgs.sum_nr_running + 1,
9361                            imb_numa_nr)) {
9362                 return NULL;
9363             }
9364         }
9365 #endif /* CONFIG_NUMA */
9366 
9367         /*
9368          * Select group with highest number of idle CPUs. We could also
9369          * compare the utilization which is more stable but it can end
9370          * up that the group has less spare capacity but finally more
9371          * idle CPUs which means more opportunity to run task.
9372          */
9373         if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
9374             return NULL;
9375         break;
9376     }
9377 
9378     return idlest;
9379 }
9380 
9381 static void update_idle_cpu_scan(struct lb_env *env,
9382                  unsigned long sum_util)
9383 {
9384     struct sched_domain_shared *sd_share;
9385     int llc_weight, pct;
9386     u64 x, y, tmp;
9387     /*
9388      * Update the number of CPUs to scan in LLC domain, which could
9389      * be used as a hint in select_idle_cpu(). The update of sd_share
9390      * could be expensive because it is within a shared cache line.
9391      * So the write of this hint only occurs during periodic load
9392      * balancing, rather than CPU_NEWLY_IDLE, because the latter
9393      * can fire way more frequently than the former.
9394      */
9395     if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
9396         return;
9397 
9398     llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
9399     if (env->sd->span_weight != llc_weight)
9400         return;
9401 
9402     sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
9403     if (!sd_share)
9404         return;
9405 
9406     /*
9407      * The number of CPUs to search drops as sum_util increases, when
9408      * sum_util hits 85% or above, the scan stops.
9409      * The reason to choose 85% as the threshold is because this is the
9410      * imbalance_pct(117) when a LLC sched group is overloaded.
9411      *
9412      * let y = SCHED_CAPACITY_SCALE - p * x^2                       [1]
9413      * and y'= y / SCHED_CAPACITY_SCALE
9414      *
9415      * x is the ratio of sum_util compared to the CPU capacity:
9416      * x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE)
9417      * y' is the ratio of CPUs to be scanned in the LLC domain,
9418      * and the number of CPUs to scan is calculated by:
9419      *
9420      * nr_scan = llc_weight * y'                                    [2]
9421      *
9422      * When x hits the threshold of overloaded, AKA, when
9423      * x = 100 / pct, y drops to 0. According to [1],
9424      * p should be SCHED_CAPACITY_SCALE * pct^2 / 10000
9425      *
9426      * Scale x by SCHED_CAPACITY_SCALE:
9427      * x' = sum_util / llc_weight;                                  [3]
9428      *
9429      * and finally [1] becomes:
9430      * y = SCHED_CAPACITY_SCALE -
9431      *     x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE)            [4]
9432      *
9433      */
9434     /* equation [3] */
9435     x = sum_util;
9436     do_div(x, llc_weight);
9437 
9438     /* equation [4] */
9439     pct = env->sd->imbalance_pct;
9440     tmp = x * x * pct * pct;
9441     do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
9442     tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);
9443     y = SCHED_CAPACITY_SCALE - tmp;
9444 
9445     /* equation [2] */
9446     y *= llc_weight;
9447     do_div(y, SCHED_CAPACITY_SCALE);
9448     if ((int)y != sd_share->nr_idle_scan)
9449         WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
9450 }
9451 
9452 /**
9453  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
9454  * @env: The load balancing environment.
9455  * @sds: variable to hold the statistics for this sched_domain.
9456  */
9457 
9458 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
9459 {
9460     struct sched_domain *child = env->sd->child;
9461     struct sched_group *sg = env->sd->groups;
9462     struct sg_lb_stats *local = &sds->local_stat;
9463     struct sg_lb_stats tmp_sgs;
9464     unsigned long sum_util = 0;
9465     int sg_status = 0;
9466 
9467     do {
9468         struct sg_lb_stats *sgs = &tmp_sgs;
9469         int local_group;
9470 
9471         local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
9472         if (local_group) {
9473             sds->local = sg;
9474             sgs = local;
9475 
9476             if (env->idle != CPU_NEWLY_IDLE ||
9477                 time_after_eq(jiffies, sg->sgc->next_update))
9478                 update_group_capacity(env->sd, env->dst_cpu);
9479         }
9480 
9481         update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
9482 
9483         if (local_group)
9484             goto next_group;
9485 
9486 
9487         if (update_sd_pick_busiest(env, sds, sg, sgs)) {
9488             sds->busiest = sg;
9489             sds->busiest_stat = *sgs;
9490         }
9491 
9492 next_group:
9493         /* Now, start updating sd_lb_stats */
9494         sds->total_load += sgs->group_load;
9495         sds->total_capacity += sgs->group_capacity;
9496 
9497         sum_util += sgs->group_util;
9498         sg = sg->next;
9499     } while (sg != env->sd->groups);
9500 
9501     /* Tag domain that child domain prefers tasks go to siblings first */
9502     sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
9503 
9504 
9505     if (env->sd->flags & SD_NUMA)
9506         env->fbq_type = fbq_classify_group(&sds->busiest_stat);
9507 
9508     if (!env->sd->parent) {
9509         struct root_domain *rd = env->dst_rq->rd;
9510 
9511         /* update overload indicator if we are at root domain */
9512         WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
9513 
9514         /* Update over-utilization (tipping point, U >= 0) indicator */
9515         WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
9516         trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
9517     } else if (sg_status & SG_OVERUTILIZED) {
9518         struct root_domain *rd = env->dst_rq->rd;
9519 
9520         WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
9521         trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
9522     }
9523 
9524     update_idle_cpu_scan(env, sum_util);
9525 }
9526 
9527 /**
9528  * calculate_imbalance - Calculate the amount of imbalance present within the
9529  *           groups of a given sched_domain during load balance.
9530  * @env: load balance environment
9531  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
9532  */
9533 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
9534 {
9535     struct sg_lb_stats *local, *busiest;
9536 
9537     local = &sds->local_stat;
9538     busiest = &sds->busiest_stat;
9539 
9540     if (busiest->group_type == group_misfit_task) {
9541         if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
9542             /* Set imbalance to allow misfit tasks to be balanced. */
9543             env->migration_type = migrate_misfit;
9544             env->imbalance = 1;
9545         } else {
9546             /*
9547              * Set load imbalance to allow moving task from cpu
9548              * with reduced capacity.
9549              */
9550             env->migration_type = migrate_load;
9551             env->imbalance = busiest->group_misfit_task_load;
9552         }
9553         return;
9554     }
9555 
9556     if (busiest->group_type == group_asym_packing) {
9557         /*
9558          * In case of asym capacity, we will try to migrate all load to
9559          * the preferred CPU.
9560          */
9561         env->migration_type = migrate_task;
9562         env->imbalance = busiest->sum_h_nr_running;
9563         return;
9564     }
9565 
9566     if (busiest->group_type == group_imbalanced) {
9567         /*
9568          * In the group_imb case we cannot rely on group-wide averages
9569          * to ensure CPU-load equilibrium, try to move any task to fix
9570          * the imbalance. The next load balance will take care of
9571          * balancing back the system.
9572          */
9573         env->migration_type = migrate_task;
9574         env->imbalance = 1;
9575         return;
9576     }
9577 
9578     /*
9579      * Try to use spare capacity of local group without overloading it or
9580      * emptying busiest.
9581      */
9582     if (local->group_type == group_has_spare) {
9583         if ((busiest->group_type > group_fully_busy) &&
9584             !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
9585             /*
9586              * If busiest is overloaded, try to fill spare
9587              * capacity. This might end up creating spare capacity
9588              * in busiest or busiest still being overloaded but
9589              * there is no simple way to directly compute the
9590              * amount of load to migrate in order to balance the
9591              * system.
9592              */
9593             env->migration_type = migrate_util;
9594             env->imbalance = max(local->group_capacity, local->group_util) -
9595                      local->group_util;
9596 
9597             /*
9598              * In some cases, the group's utilization is max or even
9599              * higher than capacity because of migrations but the
9600              * local CPU is (newly) idle. There is at least one
9601              * waiting task in this overloaded busiest group. Let's
9602              * try to pull it.
9603              */
9604             if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
9605                 env->migration_type = migrate_task;
9606                 env->imbalance = 1;
9607             }
9608 
9609             return;
9610         }
9611 
9612         if (busiest->group_weight == 1 || sds->prefer_sibling) {
9613             unsigned int nr_diff = busiest->sum_nr_running;
9614             /*
9615              * When prefer sibling, evenly spread running tasks on
9616              * groups.
9617              */
9618             env->migration_type = migrate_task;
9619             lsub_positive(&nr_diff, local->sum_nr_running);
9620             env->imbalance = nr_diff;
9621         } else {
9622 
9623             /*
9624              * If there is no overload, we just want to even the number of
9625              * idle cpus.
9626              */
9627             env->migration_type = migrate_task;
9628             env->imbalance = max_t(long, 0,
9629                            (local->idle_cpus - busiest->idle_cpus));
9630         }
9631 
9632 #ifdef CONFIG_NUMA
9633         /* Consider allowing a small imbalance between NUMA groups */
9634         if (env->sd->flags & SD_NUMA) {
9635             env->imbalance = adjust_numa_imbalance(env->imbalance,
9636                                    local->sum_nr_running + 1,
9637                                    env->sd->imb_numa_nr);
9638         }
9639 #endif
9640 
9641         /* Number of tasks to move to restore balance */
9642         env->imbalance >>= 1;
9643 
9644         return;
9645     }
9646 
9647     /*
9648      * Local is fully busy but has to take more load to relieve the
9649      * busiest group
9650      */
9651     if (local->group_type < group_overloaded) {
9652         /*
9653          * Local will become overloaded so the avg_load metrics are
9654          * finally needed.
9655          */
9656 
9657         local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
9658                   local->group_capacity;
9659 
9660         /*
9661          * If the local group is more loaded than the selected
9662          * busiest group don't try to pull any tasks.
9663          */
9664         if (local->avg_load >= busiest->avg_load) {
9665             env->imbalance = 0;
9666             return;
9667         }
9668 
9669         sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
9670                 sds->total_capacity;
9671     }
9672 
9673     /*
9674      * Both group are or will become overloaded and we're trying to get all
9675      * the CPUs to the average_load, so we don't want to push ourselves
9676      * above the average load, nor do we wish to reduce the max loaded CPU
9677      * below the average load. At the same time, we also don't want to
9678      * reduce the group load below the group capacity. Thus we look for
9679      * the minimum possible imbalance.
9680      */
9681     env->migration_type = migrate_load;
9682     env->imbalance = min(
9683         (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
9684         (sds->avg_load - local->avg_load) * local->group_capacity
9685     ) / SCHED_CAPACITY_SCALE;
9686 }
9687 
9688 /******* find_busiest_group() helpers end here *********************/
9689 
9690 /*
9691  * Decision matrix according to the local and busiest group type:
9692  *
9693  * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
9694  * has_spare        nr_idle   balanced   N/A    N/A  balanced   balanced
9695  * fully_busy       nr_idle   nr_idle    N/A    N/A  balanced   balanced
9696  * misfit_task      force     N/A        N/A    N/A  N/A        N/A
9697  * asym_packing     force     force      N/A    N/A  force      force
9698  * imbalanced       force     force      N/A    N/A  force      force
9699  * overloaded       force     force      N/A    N/A  force      avg_load
9700  *
9701  * N/A :      Not Applicable because already filtered while updating
9702  *            statistics.
9703  * balanced : The system is balanced for these 2 groups.
9704  * force :    Calculate the imbalance as load migration is probably needed.
9705  * avg_load : Only if imbalance is significant enough.
9706  * nr_idle :  dst_cpu is not busy and the number of idle CPUs is quite
9707  *            different in groups.
9708  */
9709 
9710 /**
9711  * find_busiest_group - Returns the busiest group within the sched_domain
9712  * if there is an imbalance.
9713  * @env: The load balancing environment.
9714  *
9715  * Also calculates the amount of runnable load which should be moved
9716  * to restore balance.
9717  *
9718  * Return:  - The busiest group if imbalance exists.
9719  */
9720 static struct sched_group *find_busiest_group(struct lb_env *env)
9721 {
9722     struct sg_lb_stats *local, *busiest;
9723     struct sd_lb_stats sds;
9724 
9725     init_sd_lb_stats(&sds);
9726 
9727     /*
9728      * Compute the various statistics relevant for load balancing at
9729      * this level.
9730      */
9731     update_sd_lb_stats(env, &sds);
9732 
9733     if (sched_energy_enabled()) {
9734         struct root_domain *rd = env->dst_rq->rd;
9735 
9736         if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
9737             goto out_balanced;
9738     }
9739 
9740     local = &sds.local_stat;
9741     busiest = &sds.busiest_stat;
9742 
9743     /* There is no busy sibling group to pull tasks from */
9744     if (!sds.busiest)
9745         goto out_balanced;
9746 
9747     /* Misfit tasks should be dealt with regardless of the avg load */
9748     if (busiest->group_type == group_misfit_task)
9749         goto force_balance;
9750 
9751     /* ASYM feature bypasses nice load balance check */
9752     if (busiest->group_type == group_asym_packing)
9753         goto force_balance;
9754 
9755     /*
9756      * If the busiest group is imbalanced the below checks don't
9757      * work because they assume all things are equal, which typically
9758      * isn't true due to cpus_ptr constraints and the like.
9759      */
9760     if (busiest->group_type == group_imbalanced)
9761         goto force_balance;
9762 
9763     /*
9764      * If the local group is busier than the selected busiest group
9765      * don't try and pull any tasks.
9766      */
9767     if (local->group_type > busiest->group_type)
9768         goto out_balanced;
9769 
9770     /*
9771      * When groups are overloaded, use the avg_load to ensure fairness
9772      * between tasks.
9773      */
9774     if (local->group_type == group_overloaded) {
9775         /*
9776          * If the local group is more loaded than the selected
9777          * busiest group don't try to pull any tasks.
9778          */
9779         if (local->avg_load >= busiest->avg_load)
9780             goto out_balanced;
9781 
9782         /* XXX broken for overlapping NUMA groups */
9783         sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
9784                 sds.total_capacity;
9785 
9786         /*
9787          * Don't pull any tasks if this group is already above the
9788          * domain average load.
9789          */
9790         if (local->avg_load >= sds.avg_load)
9791             goto out_balanced;
9792 
9793         /*
9794          * If the busiest group is more loaded, use imbalance_pct to be
9795          * conservative.
9796          */
9797         if (100 * busiest->avg_load <=
9798                 env->sd->imbalance_pct * local->avg_load)
9799             goto out_balanced;
9800     }
9801 
9802     /* Try to move all excess tasks to child's sibling domain */
9803     if (sds.prefer_sibling && local->group_type == group_has_spare &&
9804         busiest->sum_nr_running > local->sum_nr_running + 1)
9805         goto force_balance;
9806 
9807     if (busiest->group_type != group_overloaded) {
9808         if (env->idle == CPU_NOT_IDLE)
9809             /*
9810              * If the busiest group is not overloaded (and as a
9811              * result the local one too) but this CPU is already
9812              * busy, let another idle CPU try to pull task.
9813              */
9814             goto out_balanced;
9815 
9816         if (busiest->group_weight > 1 &&
9817             local->idle_cpus <= (busiest->idle_cpus + 1))
9818             /*
9819              * If the busiest group is not overloaded
9820              * and there is no imbalance between this and busiest
9821              * group wrt idle CPUs, it is balanced. The imbalance
9822              * becomes significant if the diff is greater than 1
9823              * otherwise we might end up to just move the imbalance
9824              * on another group. Of course this applies only if
9825              * there is more than 1 CPU per group.
9826              */
9827             goto out_balanced;
9828 
9829         if (busiest->sum_h_nr_running == 1)
9830             /*
9831              * busiest doesn't have any tasks waiting to run
9832              */
9833             goto out_balanced;
9834     }
9835 
9836 force_balance:
9837     /* Looks like there is an imbalance. Compute it */
9838     calculate_imbalance(env, &sds);
9839     return env->imbalance ? sds.busiest : NULL;
9840 
9841 out_balanced:
9842     env->imbalance = 0;
9843     return NULL;
9844 }
9845 
9846 /*
9847  * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
9848  */
9849 static struct rq *find_busiest_queue(struct lb_env *env,
9850                      struct sched_group *group)
9851 {
9852     struct rq *busiest = NULL, *rq;
9853     unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
9854     unsigned int busiest_nr = 0;
9855     int i;
9856 
9857     for_each_cpu_and(i, sched_group_span(group), env->cpus) {
9858         unsigned long capacity, load, util;
9859         unsigned int nr_running;
9860         enum fbq_type rt;
9861 
9862         rq = cpu_rq(i);
9863         rt = fbq_classify_rq(rq);
9864 
9865         /*
9866          * We classify groups/runqueues into three groups:
9867          *  - regular: there are !numa tasks
9868          *  - remote:  there are numa tasks that run on the 'wrong' node
9869          *  - all:     there is no distinction
9870          *
9871          * In order to avoid migrating ideally placed numa tasks,
9872          * ignore those when there's better options.
9873          *
9874          * If we ignore the actual busiest queue to migrate another
9875          * task, the next balance pass can still reduce the busiest
9876          * queue by moving tasks around inside the node.
9877          *
9878          * If we cannot move enough load due to this classification
9879          * the next pass will adjust the group classification and
9880          * allow migration of more tasks.
9881          *
9882          * Both cases only affect the total convergence complexity.
9883          */
9884         if (rt > env->fbq_type)
9885             continue;
9886 
9887         nr_running = rq->cfs.h_nr_running;
9888         if (!nr_running)
9889             continue;
9890 
9891         capacity = capacity_of(i);
9892 
9893         /*
9894          * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
9895          * eventually lead to active_balancing high->low capacity.
9896          * Higher per-CPU capacity is considered better than balancing
9897          * average load.
9898          */
9899         if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
9900             !capacity_greater(capacity_of(env->dst_cpu), capacity) &&
9901             nr_running == 1)
9902             continue;
9903 
9904         /* Make sure we only pull tasks from a CPU of lower priority */
9905         if ((env->sd->flags & SD_ASYM_PACKING) &&
9906             sched_asym_prefer(i, env->dst_cpu) &&
9907             nr_running == 1)
9908             continue;
9909 
9910         switch (env->migration_type) {
9911         case migrate_load:
9912             /*
9913              * When comparing with load imbalance, use cpu_load()
9914              * which is not scaled with the CPU capacity.
9915              */
9916             load = cpu_load(rq);
9917 
9918             if (nr_running == 1 && load > env->imbalance &&
9919                 !check_cpu_capacity(rq, env->sd))
9920                 break;
9921 
9922             /*
9923              * For the load comparisons with the other CPUs,
9924              * consider the cpu_load() scaled with the CPU
9925              * capacity, so that the load can be moved away
9926              * from the CPU that is potentially running at a
9927              * lower capacity.
9928              *
9929              * Thus we're looking for max(load_i / capacity_i),
9930              * crosswise multiplication to rid ourselves of the
9931              * division works out to:
9932              * load_i * capacity_j > load_j * capacity_i;
9933              * where j is our previous maximum.
9934              */
9935             if (load * busiest_capacity > busiest_load * capacity) {
9936                 busiest_load = load;
9937                 busiest_capacity = capacity;
9938                 busiest = rq;
9939             }
9940             break;
9941 
9942         case migrate_util:
9943             util = cpu_util_cfs(i);
9944 
9945             /*
9946              * Don't try to pull utilization from a CPU with one
9947              * running task. Whatever its utilization, we will fail
9948              * detach the task.
9949              */
9950             if (nr_running <= 1)
9951                 continue;
9952 
9953             if (busiest_util < util) {
9954                 busiest_util = util;
9955                 busiest = rq;
9956             }
9957             break;
9958 
9959         case migrate_task:
9960             if (busiest_nr < nr_running) {
9961                 busiest_nr = nr_running;
9962                 busiest = rq;
9963             }
9964             break;
9965 
9966         case migrate_misfit:
9967             /*
9968              * For ASYM_CPUCAPACITY domains with misfit tasks we
9969              * simply seek the "biggest" misfit task.
9970              */
9971             if (rq->misfit_task_load > busiest_load) {
9972                 busiest_load = rq->misfit_task_load;
9973                 busiest = rq;
9974             }
9975 
9976             break;
9977 
9978         }
9979     }
9980 
9981     return busiest;
9982 }
9983 
9984 /*
9985  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
9986  * so long as it is large enough.
9987  */
9988 #define MAX_PINNED_INTERVAL 512
9989 
9990 static inline bool
9991 asym_active_balance(struct lb_env *env)
9992 {
9993     /*
9994      * ASYM_PACKING needs to force migrate tasks from busy but
9995      * lower priority CPUs in order to pack all tasks in the
9996      * highest priority CPUs.
9997      */
9998     return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
9999            sched_asym_prefer(env->dst_cpu, env->src_cpu);
10000 }
10001 
10002 static inline bool
10003 imbalanced_active_balance(struct lb_env *env)
10004 {
10005     struct sched_domain *sd = env->sd;
10006 
10007     /*
10008      * The imbalanced case includes the case of pinned tasks preventing a fair
10009      * distribution of the load on the system but also the even distribution of the
10010      * threads on a system with spare capacity
10011      */
10012     if ((env->migration_type == migrate_task) &&
10013         (sd->nr_balance_failed > sd->cache_nice_tries+2))
10014         return 1;
10015 
10016     return 0;
10017 }
10018 
10019 static int need_active_balance(struct lb_env *env)
10020 {
10021     struct sched_domain *sd = env->sd;
10022 
10023     if (asym_active_balance(env))
10024         return 1;
10025 
10026     if (imbalanced_active_balance(env))
10027         return 1;
10028 
10029     /*
10030      * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
10031      * It's worth migrating the task if the src_cpu's capacity is reduced
10032      * because of other sched_class or IRQs if more capacity stays
10033      * available on dst_cpu.
10034      */
10035     if ((env->idle != CPU_NOT_IDLE) &&
10036         (env->src_rq->cfs.h_nr_running == 1)) {
10037         if ((check_cpu_capacity(env->src_rq, sd)) &&
10038             (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
10039             return 1;
10040     }
10041 
10042     if (env->migration_type == migrate_misfit)
10043         return 1;
10044 
10045     return 0;
10046 }
10047 
10048 static int active_load_balance_cpu_stop(void *data);
10049 
10050 static int should_we_balance(struct lb_env *env)
10051 {
10052     struct sched_group *sg = env->sd->groups;
10053     int cpu;
10054 
10055     /*
10056      * Ensure the balancing environment is consistent; can happen
10057      * when the softirq triggers 'during' hotplug.
10058      */
10059     if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
10060         return 0;
10061 
10062     /*
10063      * In the newly idle case, we will allow all the CPUs
10064      * to do the newly idle load balance.
10065      *
10066      * However, we bail out if we already have tasks or a wakeup pending,
10067      * to optimize wakeup latency.
10068      */
10069     if (env->idle == CPU_NEWLY_IDLE) {
10070         if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending)
10071             return 0;
10072         return 1;
10073     }
10074 
10075     /* Try to find first idle CPU */
10076     for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
10077         if (!idle_cpu(cpu))
10078             continue;
10079 
10080         /* Are we the first idle CPU? */
10081         return cpu == env->dst_cpu;
10082     }
10083 
10084     /* Are we the first CPU of this group ? */
10085     return group_balance_cpu(sg) == env->dst_cpu;
10086 }
10087 
10088 /*
10089  * Check this_cpu to ensure it is balanced within domain. Attempt to move
10090  * tasks if there is an imbalance.
10091  */
10092 static int load_balance(int this_cpu, struct rq *this_rq,
10093             struct sched_domain *sd, enum cpu_idle_type idle,
10094             int *continue_balancing)
10095 {
10096     int ld_moved, cur_ld_moved, active_balance = 0;
10097     struct sched_domain *sd_parent = sd->parent;
10098     struct sched_group *group;
10099     struct rq *busiest;
10100     struct rq_flags rf;
10101     struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
10102 
10103     struct lb_env env = {
10104         .sd     = sd,
10105         .dst_cpu    = this_cpu,
10106         .dst_rq     = this_rq,
10107         .dst_grpmask    = sched_group_span(sd->groups),
10108         .idle       = idle,
10109         .loop_break = sched_nr_migrate_break,
10110         .cpus       = cpus,
10111         .fbq_type   = all,
10112         .tasks      = LIST_HEAD_INIT(env.tasks),
10113     };
10114 
10115     cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
10116 
10117     schedstat_inc(sd->lb_count[idle]);
10118 
10119 redo:
10120     if (!should_we_balance(&env)) {
10121         *continue_balancing = 0;
10122         goto out_balanced;
10123     }
10124 
10125     group = find_busiest_group(&env);
10126     if (!group) {
10127         schedstat_inc(sd->lb_nobusyg[idle]);
10128         goto out_balanced;
10129     }
10130 
10131     busiest = find_busiest_queue(&env, group);
10132     if (!busiest) {
10133         schedstat_inc(sd->lb_nobusyq[idle]);
10134         goto out_balanced;
10135     }
10136 
10137     BUG_ON(busiest == env.dst_rq);
10138 
10139     schedstat_add(sd->lb_imbalance[idle], env.imbalance);
10140 
10141     env.src_cpu = busiest->cpu;
10142     env.src_rq = busiest;
10143 
10144     ld_moved = 0;
10145     /* Clear this flag as soon as we find a pullable task */
10146     env.flags |= LBF_ALL_PINNED;
10147     if (busiest->nr_running > 1) {
10148         /*
10149          * Attempt to move tasks. If find_busiest_group has found
10150          * an imbalance but busiest->nr_running <= 1, the group is
10151          * still unbalanced. ld_moved simply stays zero, so it is
10152          * correctly treated as an imbalance.
10153          */
10154         env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
10155 
10156 more_balance:
10157         rq_lock_irqsave(busiest, &rf);
10158         update_rq_clock(busiest);
10159 
10160         /*
10161          * cur_ld_moved - load moved in current iteration
10162          * ld_moved     - cumulative load moved across iterations
10163          */
10164         cur_ld_moved = detach_tasks(&env);
10165 
10166         /*
10167          * We've detached some tasks from busiest_rq. Every
10168          * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
10169          * unlock busiest->lock, and we are able to be sure
10170          * that nobody can manipulate the tasks in parallel.
10171          * See task_rq_lock() family for the details.
10172          */
10173 
10174         rq_unlock(busiest, &rf);
10175 
10176         if (cur_ld_moved) {
10177             attach_tasks(&env);
10178             ld_moved += cur_ld_moved;
10179         }
10180 
10181         local_irq_restore(rf.flags);
10182 
10183         if (env.flags & LBF_NEED_BREAK) {
10184             env.flags &= ~LBF_NEED_BREAK;
10185             goto more_balance;
10186         }
10187 
10188         /*
10189          * Revisit (affine) tasks on src_cpu that couldn't be moved to
10190          * us and move them to an alternate dst_cpu in our sched_group
10191          * where they can run. The upper limit on how many times we
10192          * iterate on same src_cpu is dependent on number of CPUs in our
10193          * sched_group.
10194          *
10195          * This changes load balance semantics a bit on who can move
10196          * load to a given_cpu. In addition to the given_cpu itself
10197          * (or a ilb_cpu acting on its behalf where given_cpu is
10198          * nohz-idle), we now have balance_cpu in a position to move
10199          * load to given_cpu. In rare situations, this may cause
10200          * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
10201          * _independently_ and at _same_ time to move some load to
10202          * given_cpu) causing excess load to be moved to given_cpu.
10203          * This however should not happen so much in practice and
10204          * moreover subsequent load balance cycles should correct the
10205          * excess load moved.
10206          */
10207         if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
10208 
10209             /* Prevent to re-select dst_cpu via env's CPUs */
10210             __cpumask_clear_cpu(env.dst_cpu, env.cpus);
10211 
10212             env.dst_rq   = cpu_rq(env.new_dst_cpu);
10213             env.dst_cpu  = env.new_dst_cpu;
10214             env.flags   &= ~LBF_DST_PINNED;
10215             env.loop     = 0;
10216             env.loop_break   = sched_nr_migrate_break;
10217 
10218             /*
10219              * Go back to "more_balance" rather than "redo" since we
10220              * need to continue with same src_cpu.
10221              */
10222             goto more_balance;
10223         }
10224 
10225         /*
10226          * We failed to reach balance because of affinity.
10227          */
10228         if (sd_parent) {
10229             int *group_imbalance = &sd_parent->groups->sgc->imbalance;
10230 
10231             if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
10232                 *group_imbalance = 1;
10233         }
10234 
10235         /* All tasks on this runqueue were pinned by CPU affinity */
10236         if (unlikely(env.flags & LBF_ALL_PINNED)) {
10237             __cpumask_clear_cpu(cpu_of(busiest), cpus);
10238             /*
10239              * Attempting to continue load balancing at the current
10240              * sched_domain level only makes sense if there are
10241              * active CPUs remaining as possible busiest CPUs to
10242              * pull load from which are not contained within the
10243              * destination group that is receiving any migrated
10244              * load.
10245              */
10246             if (!cpumask_subset(cpus, env.dst_grpmask)) {
10247                 env.loop = 0;
10248                 env.loop_break = sched_nr_migrate_break;
10249                 goto redo;
10250             }
10251             goto out_all_pinned;
10252         }
10253     }
10254 
10255     if (!ld_moved) {
10256         schedstat_inc(sd->lb_failed[idle]);
10257         /*
10258          * Increment the failure counter only on periodic balance.
10259          * We do not want newidle balance, which can be very
10260          * frequent, pollute the failure counter causing
10261          * excessive cache_hot migrations and active balances.
10262          */
10263         if (idle != CPU_NEWLY_IDLE)
10264             sd->nr_balance_failed++;
10265 
10266         if (need_active_balance(&env)) {
10267             unsigned long flags;
10268 
10269             raw_spin_rq_lock_irqsave(busiest, flags);
10270 
10271             /*
10272              * Don't kick the active_load_balance_cpu_stop,
10273              * if the curr task on busiest CPU can't be
10274              * moved to this_cpu:
10275              */
10276             if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
10277                 raw_spin_rq_unlock_irqrestore(busiest, flags);
10278                 goto out_one_pinned;
10279             }
10280 
10281             /* Record that we found at least one task that could run on this_cpu */
10282             env.flags &= ~LBF_ALL_PINNED;
10283 
10284             /*
10285              * ->active_balance synchronizes accesses to
10286              * ->active_balance_work.  Once set, it's cleared
10287              * only after active load balance is finished.
10288              */
10289             if (!busiest->active_balance) {
10290                 busiest->active_balance = 1;
10291                 busiest->push_cpu = this_cpu;
10292                 active_balance = 1;
10293             }
10294             raw_spin_rq_unlock_irqrestore(busiest, flags);
10295 
10296             if (active_balance) {
10297                 stop_one_cpu_nowait(cpu_of(busiest),
10298                     active_load_balance_cpu_stop, busiest,
10299                     &busiest->active_balance_work);
10300             }
10301         }
10302     } else {
10303         sd->nr_balance_failed = 0;
10304     }
10305 
10306     if (likely(!active_balance) || need_active_balance(&env)) {
10307         /* We were unbalanced, so reset the balancing interval */
10308         sd->balance_interval = sd->min_interval;
10309     }
10310 
10311     goto out;
10312 
10313 out_balanced:
10314     /*
10315      * We reach balance although we may have faced some affinity
10316      * constraints. Clear the imbalance flag only if other tasks got
10317      * a chance to move and fix the imbalance.
10318      */
10319     if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
10320         int *group_imbalance = &sd_parent->groups->sgc->imbalance;
10321 
10322         if (*group_imbalance)
10323             *group_imbalance = 0;
10324     }
10325 
10326 out_all_pinned:
10327     /*
10328      * We reach balance because all tasks are pinned at this level so
10329      * we can't migrate them. Let the imbalance flag set so parent level
10330      * can try to migrate them.
10331      */
10332     schedstat_inc(sd->lb_balanced[idle]);
10333 
10334     sd->nr_balance_failed = 0;
10335 
10336 out_one_pinned:
10337     ld_moved = 0;
10338 
10339     /*
10340      * newidle_balance() disregards balance intervals, so we could
10341      * repeatedly reach this code, which would lead to balance_interval
10342      * skyrocketing in a short amount of time. Skip the balance_interval
10343      * increase logic to avoid that.
10344      */
10345     if (env.idle == CPU_NEWLY_IDLE)
10346         goto out;
10347 
10348     /* tune up the balancing interval */
10349     if ((env.flags & LBF_ALL_PINNED &&
10350          sd->balance_interval < MAX_PINNED_INTERVAL) ||
10351         sd->balance_interval < sd->max_interval)
10352         sd->balance_interval *= 2;
10353 out:
10354     return ld_moved;
10355 }
10356 
10357 static inline unsigned long
10358 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
10359 {
10360     unsigned long interval = sd->balance_interval;
10361 
10362     if (cpu_busy)
10363         interval *= sd->busy_factor;
10364 
10365     /* scale ms to jiffies */
10366     interval = msecs_to_jiffies(interval);
10367 
10368     /*
10369      * Reduce likelihood of busy balancing at higher domains racing with
10370      * balancing at lower domains by preventing their balancing periods
10371      * from being multiples of each other.
10372      */
10373     if (cpu_busy)
10374         interval -= 1;
10375 
10376     interval = clamp(interval, 1UL, max_load_balance_interval);
10377 
10378     return interval;
10379 }
10380 
10381 static inline void
10382 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
10383 {
10384     unsigned long interval, next;
10385 
10386     /* used by idle balance, so cpu_busy = 0 */
10387     interval = get_sd_balance_interval(sd, 0);
10388     next = sd->last_balance + interval;
10389 
10390     if (time_after(*next_balance, next))
10391         *next_balance = next;
10392 }
10393 
10394 /*
10395  * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
10396  * running tasks off the busiest CPU onto idle CPUs. It requires at
10397  * least 1 task to be running on each physical CPU where possible, and
10398  * avoids physical / logical imbalances.
10399  */
10400 static int active_load_balance_cpu_stop(void *data)
10401 {
10402     struct rq *busiest_rq = data;
10403     int busiest_cpu = cpu_of(busiest_rq);
10404     int target_cpu = busiest_rq->push_cpu;
10405     struct rq *target_rq = cpu_rq(target_cpu);
10406     struct sched_domain *sd;
10407     struct task_struct *p = NULL;
10408     struct rq_flags rf;
10409 
10410     rq_lock_irq(busiest_rq, &rf);
10411     /*
10412      * Between queueing the stop-work and running it is a hole in which
10413      * CPUs can become inactive. We should not move tasks from or to
10414      * inactive CPUs.
10415      */
10416     if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
10417         goto out_unlock;
10418 
10419     /* Make sure the requested CPU hasn't gone down in the meantime: */
10420     if (unlikely(busiest_cpu != smp_processor_id() ||
10421              !busiest_rq->active_balance))
10422         goto out_unlock;
10423 
10424     /* Is there any task to move? */
10425     if (busiest_rq->nr_running <= 1)
10426         goto out_unlock;
10427 
10428     /*
10429      * This condition is "impossible", if it occurs
10430      * we need to fix it. Originally reported by
10431      * Bjorn Helgaas on a 128-CPU setup.
10432      */
10433     BUG_ON(busiest_rq == target_rq);
10434 
10435     /* Search for an sd spanning us and the target CPU. */
10436     rcu_read_lock();
10437     for_each_domain(target_cpu, sd) {
10438         if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
10439             break;
10440     }
10441 
10442     if (likely(sd)) {
10443         struct lb_env env = {
10444             .sd     = sd,
10445             .dst_cpu    = target_cpu,
10446             .dst_rq     = target_rq,
10447             .src_cpu    = busiest_rq->cpu,
10448             .src_rq     = busiest_rq,
10449             .idle       = CPU_IDLE,
10450             .flags      = LBF_ACTIVE_LB,
10451         };
10452 
10453         schedstat_inc(sd->alb_count);
10454         update_rq_clock(busiest_rq);
10455 
10456         p = detach_one_task(&env);
10457         if (p) {
10458             schedstat_inc(sd->alb_pushed);
10459             /* Active balancing done, reset the failure counter. */
10460             sd->nr_balance_failed = 0;
10461         } else {
10462             schedstat_inc(sd->alb_failed);
10463         }
10464     }
10465     rcu_read_unlock();
10466 out_unlock:
10467     busiest_rq->active_balance = 0;
10468     rq_unlock(busiest_rq, &rf);
10469 
10470     if (p)
10471         attach_one_task(target_rq, p);
10472 
10473     local_irq_enable();
10474 
10475     return 0;
10476 }
10477 
10478 static DEFINE_SPINLOCK(balancing);
10479 
10480 /*
10481  * Scale the max load_balance interval with the number of CPUs in the system.
10482  * This trades load-balance latency on larger machines for less cross talk.
10483  */
10484 void update_max_interval(void)
10485 {
10486     max_load_balance_interval = HZ*num_online_cpus()/10;
10487 }
10488 
10489 static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
10490 {
10491     if (cost > sd->max_newidle_lb_cost) {
10492         /*
10493          * Track max cost of a domain to make sure to not delay the
10494          * next wakeup on the CPU.
10495          */
10496         sd->max_newidle_lb_cost = cost;
10497         sd->last_decay_max_lb_cost = jiffies;
10498     } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) {
10499         /*
10500          * Decay the newidle max times by ~1% per second to ensure that
10501          * it is not outdated and the current max cost is actually
10502          * shorter.
10503          */
10504         sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256;
10505         sd->last_decay_max_lb_cost = jiffies;
10506 
10507         return true;
10508     }
10509 
10510     return false;
10511 }
10512 
10513 /*
10514  * It checks each scheduling domain to see if it is due to be balanced,
10515  * and initiates a balancing operation if so.
10516  *
10517  * Balancing parameters are set up in init_sched_domains.
10518  */
10519 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
10520 {
10521     int continue_balancing = 1;
10522     int cpu = rq->cpu;
10523     int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10524     unsigned long interval;
10525     struct sched_domain *sd;
10526     /* Earliest time when we have to do rebalance again */
10527     unsigned long next_balance = jiffies + 60*HZ;
10528     int update_next_balance = 0;
10529     int need_serialize, need_decay = 0;
10530     u64 max_cost = 0;
10531 
10532     rcu_read_lock();
10533     for_each_domain(cpu, sd) {
10534         /*
10535          * Decay the newidle max times here because this is a regular
10536          * visit to all the domains.
10537          */
10538         need_decay = update_newidle_cost(sd, 0);
10539         max_cost += sd->max_newidle_lb_cost;
10540 
10541         /*
10542          * Stop the load balance at this level. There is another
10543          * CPU in our sched group which is doing load balancing more
10544          * actively.
10545          */
10546         if (!continue_balancing) {
10547             if (need_decay)
10548                 continue;
10549             break;
10550         }
10551 
10552         interval = get_sd_balance_interval(sd, busy);
10553 
10554         need_serialize = sd->flags & SD_SERIALIZE;
10555         if (need_serialize) {
10556             if (!spin_trylock(&balancing))
10557                 goto out;
10558         }
10559 
10560         if (time_after_eq(jiffies, sd->last_balance + interval)) {
10561             if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
10562                 /*
10563                  * The LBF_DST_PINNED logic could have changed
10564                  * env->dst_cpu, so we can't know our idle
10565                  * state even if we migrated tasks. Update it.
10566                  */
10567                 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
10568                 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10569             }
10570             sd->last_balance = jiffies;
10571             interval = get_sd_balance_interval(sd, busy);
10572         }
10573         if (need_serialize)
10574             spin_unlock(&balancing);
10575 out:
10576         if (time_after(next_balance, sd->last_balance + interval)) {
10577             next_balance = sd->last_balance + interval;
10578             update_next_balance = 1;
10579         }
10580     }
10581     if (need_decay) {
10582         /*
10583          * Ensure the rq-wide value also decays but keep it at a
10584          * reasonable floor to avoid funnies with rq->avg_idle.
10585          */
10586         rq->max_idle_balance_cost =
10587             max((u64)sysctl_sched_migration_cost, max_cost);
10588     }
10589     rcu_read_unlock();
10590 
10591     /*
10592      * next_balance will be updated only when there is a need.
10593      * When the cpu is attached to null domain for ex, it will not be
10594      * updated.
10595      */
10596     if (likely(update_next_balance))
10597         rq->next_balance = next_balance;
10598 
10599 }
10600 
10601 static inline int on_null_domain(struct rq *rq)
10602 {
10603     return unlikely(!rcu_dereference_sched(rq->sd));
10604 }
10605 
10606 #ifdef CONFIG_NO_HZ_COMMON
10607 /*
10608  * idle load balancing details
10609  * - When one of the busy CPUs notice that there may be an idle rebalancing
10610  *   needed, they will kick the idle load balancer, which then does idle
10611  *   load balancing for all the idle CPUs.
10612  * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set
10613  *   anywhere yet.
10614  */
10615 
10616 static inline int find_new_ilb(void)
10617 {
10618     int ilb;
10619     const struct cpumask *hk_mask;
10620 
10621     hk_mask = housekeeping_cpumask(HK_TYPE_MISC);
10622 
10623     for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) {
10624 
10625         if (ilb == smp_processor_id())
10626             continue;
10627 
10628         if (idle_cpu(ilb))
10629             return ilb;
10630     }
10631 
10632     return nr_cpu_ids;
10633 }
10634 
10635 /*
10636  * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
10637  * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one).
10638  */
10639 static void kick_ilb(unsigned int flags)
10640 {
10641     int ilb_cpu;
10642 
10643     /*
10644      * Increase nohz.next_balance only when if full ilb is triggered but
10645      * not if we only update stats.
10646      */
10647     if (flags & NOHZ_BALANCE_KICK)
10648         nohz.next_balance = jiffies+1;
10649 
10650     ilb_cpu = find_new_ilb();
10651 
10652     if (ilb_cpu >= nr_cpu_ids)
10653         return;
10654 
10655     /*
10656      * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
10657      * the first flag owns it; cleared by nohz_csd_func().
10658      */
10659     flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
10660     if (flags & NOHZ_KICK_MASK)
10661         return;
10662 
10663     /*
10664      * This way we generate an IPI on the target CPU which
10665      * is idle. And the softirq performing nohz idle load balance
10666      * will be run before returning from the IPI.
10667      */
10668     smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
10669 }
10670 
10671 /*
10672  * Current decision point for kicking the idle load balancer in the presence
10673  * of idle CPUs in the system.
10674  */
10675 static void nohz_balancer_kick(struct rq *rq)
10676 {
10677     unsigned long now = jiffies;
10678     struct sched_domain_shared *sds;
10679     struct sched_domain *sd;
10680     int nr_busy, i, cpu = rq->cpu;
10681     unsigned int flags = 0;
10682 
10683     if (unlikely(rq->idle_balance))
10684         return;
10685 
10686     /*
10687      * We may be recently in ticked or tickless idle mode. At the first
10688      * busy tick after returning from idle, we will update the busy stats.
10689      */
10690     nohz_balance_exit_idle(rq);
10691 
10692     /*
10693      * None are in tickless mode and hence no need for NOHZ idle load
10694      * balancing.
10695      */
10696     if (likely(!atomic_read(&nohz.nr_cpus)))
10697         return;
10698 
10699     if (READ_ONCE(nohz.has_blocked) &&
10700         time_after(now, READ_ONCE(nohz.next_blocked)))
10701         flags = NOHZ_STATS_KICK;
10702 
10703     if (time_before(now, nohz.next_balance))
10704         goto out;
10705 
10706     if (rq->nr_running >= 2) {
10707         flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
10708         goto out;
10709     }
10710 
10711     rcu_read_lock();
10712 
10713     sd = rcu_dereference(rq->sd);
10714     if (sd) {
10715         /*
10716          * If there's a CFS task and the current CPU has reduced
10717          * capacity; kick the ILB to see if there's a better CPU to run
10718          * on.
10719          */
10720         if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
10721             flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
10722             goto unlock;
10723         }
10724     }
10725 
10726     sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
10727     if (sd) {
10728         /*
10729          * When ASYM_PACKING; see if there's a more preferred CPU
10730          * currently idle; in which case, kick the ILB to move tasks
10731          * around.
10732          */
10733         for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
10734             if (sched_asym_prefer(i, cpu)) {
10735                 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
10736                 goto unlock;
10737             }
10738         }
10739     }
10740 
10741     sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
10742     if (sd) {
10743         /*
10744          * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
10745          * to run the misfit task on.
10746          */
10747         if (check_misfit_status(rq, sd)) {
10748             flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
10749             goto unlock;
10750         }
10751 
10752         /*
10753          * For asymmetric systems, we do not want to nicely balance
10754          * cache use, instead we want to embrace asymmetry and only
10755          * ensure tasks have enough CPU capacity.
10756          *
10757          * Skip the LLC logic because it's not relevant in that case.
10758          */
10759         goto unlock;
10760     }
10761 
10762     sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
10763     if (sds) {
10764         /*
10765          * If there is an imbalance between LLC domains (IOW we could
10766          * increase the overall cache use), we need some less-loaded LLC
10767          * domain to pull some load. Likewise, we may need to spread
10768          * load within the current LLC domain (e.g. packed SMT cores but
10769          * other CPUs are idle). We can't really know from here how busy
10770          * the others are - so just get a nohz balance going if it looks
10771          * like this LLC domain has tasks we could move.
10772          */
10773         nr_busy = atomic_read(&sds->nr_busy_cpus);
10774         if (nr_busy > 1) {
10775             flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
10776             goto unlock;
10777         }
10778     }
10779 unlock:
10780     rcu_read_unlock();
10781 out:
10782     if (READ_ONCE(nohz.needs_update))
10783         flags |= NOHZ_NEXT_KICK;
10784 
10785     if (flags)
10786         kick_ilb(flags);
10787 }
10788 
10789 static void set_cpu_sd_state_busy(int cpu)
10790 {
10791     struct sched_domain *sd;
10792 
10793     rcu_read_lock();
10794     sd = rcu_dereference(per_cpu(sd_llc, cpu));
10795 
10796     if (!sd || !sd->nohz_idle)
10797         goto unlock;
10798     sd->nohz_idle = 0;
10799 
10800     atomic_inc(&sd->shared->nr_busy_cpus);
10801 unlock:
10802     rcu_read_unlock();
10803 }
10804 
10805 void nohz_balance_exit_idle(struct rq *rq)
10806 {
10807     SCHED_WARN_ON(rq != this_rq());
10808 
10809     if (likely(!rq->nohz_tick_stopped))
10810         return;
10811 
10812     rq->nohz_tick_stopped = 0;
10813     cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
10814     atomic_dec(&nohz.nr_cpus);
10815 
10816     set_cpu_sd_state_busy(rq->cpu);
10817 }
10818 
10819 static void set_cpu_sd_state_idle(int cpu)
10820 {
10821     struct sched_domain *sd;
10822 
10823     rcu_read_lock();
10824     sd = rcu_dereference(per_cpu(sd_llc, cpu));
10825 
10826     if (!sd || sd->nohz_idle)
10827         goto unlock;
10828     sd->nohz_idle = 1;
10829 
10830     atomic_dec(&sd->shared->nr_busy_cpus);
10831 unlock:
10832     rcu_read_unlock();
10833 }
10834 
10835 /*
10836  * This routine will record that the CPU is going idle with tick stopped.
10837  * This info will be used in performing idle load balancing in the future.
10838  */
10839 void nohz_balance_enter_idle(int cpu)
10840 {
10841     struct rq *rq = cpu_rq(cpu);
10842 
10843     SCHED_WARN_ON(cpu != smp_processor_id());
10844 
10845     /* If this CPU is going down, then nothing needs to be done: */
10846     if (!cpu_active(cpu))
10847         return;
10848 
10849     /* Spare idle load balancing on CPUs that don't want to be disturbed: */
10850     if (!housekeeping_cpu(cpu, HK_TYPE_SCHED))
10851         return;
10852 
10853     /*
10854      * Can be set safely without rq->lock held
10855      * If a clear happens, it will have evaluated last additions because
10856      * rq->lock is held during the check and the clear
10857      */
10858     rq->has_blocked_load = 1;
10859 
10860     /*
10861      * The tick is still stopped but load could have been added in the
10862      * meantime. We set the nohz.has_blocked flag to trig a check of the
10863      * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
10864      * of nohz.has_blocked can only happen after checking the new load
10865      */
10866     if (rq->nohz_tick_stopped)
10867         goto out;
10868 
10869     /* If we're a completely isolated CPU, we don't play: */
10870     if (on_null_domain(rq))
10871         return;
10872 
10873     rq->nohz_tick_stopped = 1;
10874 
10875     cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
10876     atomic_inc(&nohz.nr_cpus);
10877 
10878     /*
10879      * Ensures that if nohz_idle_balance() fails to observe our
10880      * @idle_cpus_mask store, it must observe the @has_blocked
10881      * and @needs_update stores.
10882      */
10883     smp_mb__after_atomic();
10884 
10885     set_cpu_sd_state_idle(cpu);
10886 
10887     WRITE_ONCE(nohz.needs_update, 1);
10888 out:
10889     /*
10890      * Each time a cpu enter idle, we assume that it has blocked load and
10891      * enable the periodic update of the load of idle cpus
10892      */
10893     WRITE_ONCE(nohz.has_blocked, 1);
10894 }
10895 
10896 static bool update_nohz_stats(struct rq *rq)
10897 {
10898     unsigned int cpu = rq->cpu;
10899 
10900     if (!rq->has_blocked_load)
10901         return false;
10902 
10903     if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
10904         return false;
10905 
10906     if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
10907         return true;
10908 
10909     update_blocked_averages(cpu);
10910 
10911     return rq->has_blocked_load;
10912 }
10913 
10914 /*
10915  * Internal function that runs load balance for all idle cpus. The load balance
10916  * can be a simple update of blocked load or a complete load balance with
10917  * tasks movement depending of flags.
10918  */
10919 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
10920                    enum cpu_idle_type idle)
10921 {
10922     /* Earliest time when we have to do rebalance again */
10923     unsigned long now = jiffies;
10924     unsigned long next_balance = now + 60*HZ;
10925     bool has_blocked_load = false;
10926     int update_next_balance = 0;
10927     int this_cpu = this_rq->cpu;
10928     int balance_cpu;
10929     struct rq *rq;
10930 
10931     SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
10932 
10933     /*
10934      * We assume there will be no idle load after this update and clear
10935      * the has_blocked flag. If a cpu enters idle in the mean time, it will
10936      * set the has_blocked flag and trigger another update of idle load.
10937      * Because a cpu that becomes idle, is added to idle_cpus_mask before
10938      * setting the flag, we are sure to not clear the state and not
10939      * check the load of an idle cpu.
10940      *
10941      * Same applies to idle_cpus_mask vs needs_update.
10942      */
10943     if (flags & NOHZ_STATS_KICK)
10944         WRITE_ONCE(nohz.has_blocked, 0);
10945     if (flags & NOHZ_NEXT_KICK)
10946         WRITE_ONCE(nohz.needs_update, 0);
10947 
10948     /*
10949      * Ensures that if we miss the CPU, we must see the has_blocked
10950      * store from nohz_balance_enter_idle().
10951      */
10952     smp_mb();
10953 
10954     /*
10955      * Start with the next CPU after this_cpu so we will end with this_cpu and let a
10956      * chance for other idle cpu to pull load.
10957      */
10958     for_each_cpu_wrap(balance_cpu,  nohz.idle_cpus_mask, this_cpu+1) {
10959         if (!idle_cpu(balance_cpu))
10960             continue;
10961 
10962         /*
10963          * If this CPU gets work to do, stop the load balancing
10964          * work being done for other CPUs. Next load
10965          * balancing owner will pick it up.
10966          */
10967         if (need_resched()) {
10968             if (flags & NOHZ_STATS_KICK)
10969                 has_blocked_load = true;
10970             if (flags & NOHZ_NEXT_KICK)
10971                 WRITE_ONCE(nohz.needs_update, 1);
10972             goto abort;
10973         }
10974 
10975         rq = cpu_rq(balance_cpu);
10976 
10977         if (flags & NOHZ_STATS_KICK)
10978             has_blocked_load |= update_nohz_stats(rq);
10979 
10980         /*
10981          * If time for next balance is due,
10982          * do the balance.
10983          */
10984         if (time_after_eq(jiffies, rq->next_balance)) {
10985             struct rq_flags rf;
10986 
10987             rq_lock_irqsave(rq, &rf);
10988             update_rq_clock(rq);
10989             rq_unlock_irqrestore(rq, &rf);
10990 
10991             if (flags & NOHZ_BALANCE_KICK)
10992                 rebalance_domains(rq, CPU_IDLE);
10993         }
10994 
10995         if (time_after(next_balance, rq->next_balance)) {
10996             next_balance = rq->next_balance;
10997             update_next_balance = 1;
10998         }
10999     }
11000 
11001     /*
11002      * next_balance will be updated only when there is a need.
11003      * When the CPU is attached to null domain for ex, it will not be
11004      * updated.
11005      */
11006     if (likely(update_next_balance))
11007         nohz.next_balance = next_balance;
11008 
11009     if (flags & NOHZ_STATS_KICK)
11010         WRITE_ONCE(nohz.next_blocked,
11011                now + msecs_to_jiffies(LOAD_AVG_PERIOD));
11012 
11013 abort:
11014     /* There is still blocked load, enable periodic update */
11015     if (has_blocked_load)
11016         WRITE_ONCE(nohz.has_blocked, 1);
11017 }
11018 
11019 /*
11020  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
11021  * rebalancing for all the cpus for whom scheduler ticks are stopped.
11022  */
11023 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11024 {
11025     unsigned int flags = this_rq->nohz_idle_balance;
11026 
11027     if (!flags)
11028         return false;
11029 
11030     this_rq->nohz_idle_balance = 0;
11031 
11032     if (idle != CPU_IDLE)
11033         return false;
11034 
11035     _nohz_idle_balance(this_rq, flags, idle);
11036 
11037     return true;
11038 }
11039 
11040 /*
11041  * Check if we need to run the ILB for updating blocked load before entering
11042  * idle state.
11043  */
11044 void nohz_run_idle_balance(int cpu)
11045 {
11046     unsigned int flags;
11047 
11048     flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
11049 
11050     /*
11051      * Update the blocked load only if no SCHED_SOFTIRQ is about to happen
11052      * (ie NOHZ_STATS_KICK set) and will do the same.
11053      */
11054     if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
11055         _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE);
11056 }
11057 
11058 static void nohz_newidle_balance(struct rq *this_rq)
11059 {
11060     int this_cpu = this_rq->cpu;
11061 
11062     /*
11063      * This CPU doesn't want to be disturbed by scheduler
11064      * housekeeping
11065      */
11066     if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED))
11067         return;
11068 
11069     /* Will wake up very soon. No time for doing anything else*/
11070     if (this_rq->avg_idle < sysctl_sched_migration_cost)
11071         return;
11072 
11073     /* Don't need to update blocked load of idle CPUs*/
11074     if (!READ_ONCE(nohz.has_blocked) ||
11075         time_before(jiffies, READ_ONCE(nohz.next_blocked)))
11076         return;
11077 
11078     /*
11079      * Set the need to trigger ILB in order to update blocked load
11080      * before entering idle state.
11081      */
11082     atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
11083 }
11084 
11085 #else /* !CONFIG_NO_HZ_COMMON */
11086 static inline void nohz_balancer_kick(struct rq *rq) { }
11087 
11088 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11089 {
11090     return false;
11091 }
11092 
11093 static inline void nohz_newidle_balance(struct rq *this_rq) { }
11094 #endif /* CONFIG_NO_HZ_COMMON */
11095 
11096 /*
11097  * newidle_balance is called by schedule() if this_cpu is about to become
11098  * idle. Attempts to pull tasks from other CPUs.
11099  *
11100  * Returns:
11101  *   < 0 - we released the lock and there are !fair tasks present
11102  *     0 - failed, no new tasks
11103  *   > 0 - success, new (fair) tasks present
11104  */
11105 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
11106 {
11107     unsigned long next_balance = jiffies + HZ;
11108     int this_cpu = this_rq->cpu;
11109     u64 t0, t1, curr_cost = 0;
11110     struct sched_domain *sd;
11111     int pulled_task = 0;
11112 
11113     update_misfit_status(NULL, this_rq);
11114 
11115     /*
11116      * There is a task waiting to run. No need to search for one.
11117      * Return 0; the task will be enqueued when switching to idle.
11118      */
11119     if (this_rq->ttwu_pending)
11120         return 0;
11121 
11122     /*
11123      * We must set idle_stamp _before_ calling idle_balance(), such that we
11124      * measure the duration of idle_balance() as idle time.
11125      */
11126     this_rq->idle_stamp = rq_clock(this_rq);
11127 
11128     /*
11129      * Do not pull tasks towards !active CPUs...
11130      */
11131     if (!cpu_active(this_cpu))
11132         return 0;
11133 
11134     /*
11135      * This is OK, because current is on_cpu, which avoids it being picked
11136      * for load-balance and preemption/IRQs are still disabled avoiding
11137      * further scheduler activity on it and we're being very careful to
11138      * re-start the picking loop.
11139      */
11140     rq_unpin_lock(this_rq, rf);
11141 
11142     rcu_read_lock();
11143     sd = rcu_dereference_check_sched_domain(this_rq->sd);
11144 
11145     if (!READ_ONCE(this_rq->rd->overload) ||
11146         (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) {
11147 
11148         if (sd)
11149             update_next_balance(sd, &next_balance);
11150         rcu_read_unlock();
11151 
11152         goto out;
11153     }
11154     rcu_read_unlock();
11155 
11156     raw_spin_rq_unlock(this_rq);
11157 
11158     t0 = sched_clock_cpu(this_cpu);
11159     update_blocked_averages(this_cpu);
11160 
11161     rcu_read_lock();
11162     for_each_domain(this_cpu, sd) {
11163         int continue_balancing = 1;
11164         u64 domain_cost;
11165 
11166         update_next_balance(sd, &next_balance);
11167 
11168         if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
11169             break;
11170 
11171         if (sd->flags & SD_BALANCE_NEWIDLE) {
11172 
11173             pulled_task = load_balance(this_cpu, this_rq,
11174                            sd, CPU_NEWLY_IDLE,
11175                            &continue_balancing);
11176 
11177             t1 = sched_clock_cpu(this_cpu);
11178             domain_cost = t1 - t0;
11179             update_newidle_cost(sd, domain_cost);
11180 
11181             curr_cost += domain_cost;
11182             t0 = t1;
11183         }
11184 
11185         /*
11186          * Stop searching for tasks to pull if there are
11187          * now runnable tasks on this rq.
11188          */
11189         if (pulled_task || this_rq->nr_running > 0 ||
11190             this_rq->ttwu_pending)
11191             break;
11192     }
11193     rcu_read_unlock();
11194 
11195     raw_spin_rq_lock(this_rq);
11196 
11197     if (curr_cost > this_rq->max_idle_balance_cost)
11198         this_rq->max_idle_balance_cost = curr_cost;
11199 
11200     /*
11201      * While browsing the domains, we released the rq lock, a task could
11202      * have been enqueued in the meantime. Since we're not going idle,
11203      * pretend we pulled a task.
11204      */
11205     if (this_rq->cfs.h_nr_running && !pulled_task)
11206         pulled_task = 1;
11207 
11208     /* Is there a task of a high priority class? */
11209     if (this_rq->nr_running != this_rq->cfs.h_nr_running)
11210         pulled_task = -1;
11211 
11212 out:
11213     /* Move the next balance forward */
11214     if (time_after(this_rq->next_balance, next_balance))
11215         this_rq->next_balance = next_balance;
11216 
11217     if (pulled_task)
11218         this_rq->idle_stamp = 0;
11219     else
11220         nohz_newidle_balance(this_rq);
11221 
11222     rq_repin_lock(this_rq, rf);
11223 
11224     return pulled_task;
11225 }
11226 
11227 /*
11228  * run_rebalance_domains is triggered when needed from the scheduler tick.
11229  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
11230  */
11231 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
11232 {
11233     struct rq *this_rq = this_rq();
11234     enum cpu_idle_type idle = this_rq->idle_balance ?
11235                         CPU_IDLE : CPU_NOT_IDLE;
11236 
11237     /*
11238      * If this CPU has a pending nohz_balance_kick, then do the
11239      * balancing on behalf of the other idle CPUs whose ticks are
11240      * stopped. Do nohz_idle_balance *before* rebalance_domains to
11241      * give the idle CPUs a chance to load balance. Else we may
11242      * load balance only within the local sched_domain hierarchy
11243      * and abort nohz_idle_balance altogether if we pull some load.
11244      */
11245     if (nohz_idle_balance(this_rq, idle))
11246         return;
11247 
11248     /* normal load balance */
11249     update_blocked_averages(this_rq->cpu);
11250     rebalance_domains(this_rq, idle);
11251 }
11252 
11253 /*
11254  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
11255  */
11256 void trigger_load_balance(struct rq *rq)
11257 {
11258     /*
11259      * Don't need to rebalance while attached to NULL domain or
11260      * runqueue CPU is not active
11261      */
11262     if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
11263         return;
11264 
11265     if (time_after_eq(jiffies, rq->next_balance))
11266         raise_softirq(SCHED_SOFTIRQ);
11267 
11268     nohz_balancer_kick(rq);
11269 }
11270 
11271 static void rq_online_fair(struct rq *rq)
11272 {
11273     update_sysctl();
11274 
11275     update_runtime_enabled(rq);
11276 }
11277 
11278 static void rq_offline_fair(struct rq *rq)
11279 {
11280     update_sysctl();
11281 
11282     /* Ensure any throttled groups are reachable by pick_next_task */
11283     unthrottle_offline_cfs_rqs(rq);
11284 }
11285 
11286 #endif /* CONFIG_SMP */
11287 
11288 #ifdef CONFIG_SCHED_CORE
11289 static inline bool
11290 __entity_slice_used(struct sched_entity *se, int min_nr_tasks)
11291 {
11292     u64 slice = sched_slice(cfs_rq_of(se), se);
11293     u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
11294 
11295     return (rtime * min_nr_tasks > slice);
11296 }
11297 
11298 #define MIN_NR_TASKS_DURING_FORCEIDLE   2
11299 static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
11300 {
11301     if (!sched_core_enabled(rq))
11302         return;
11303 
11304     /*
11305      * If runqueue has only one task which used up its slice and
11306      * if the sibling is forced idle, then trigger schedule to
11307      * give forced idle task a chance.
11308      *
11309      * sched_slice() considers only this active rq and it gets the
11310      * whole slice. But during force idle, we have siblings acting
11311      * like a single runqueue and hence we need to consider runnable
11312      * tasks on this CPU and the forced idle CPU. Ideally, we should
11313      * go through the forced idle rq, but that would be a perf hit.
11314      * We can assume that the forced idle CPU has at least
11315      * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
11316      * if we need to give up the CPU.
11317      */
11318     if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
11319         __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
11320         resched_curr(rq);
11321 }
11322 
11323 /*
11324  * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
11325  */
11326 static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
11327 {
11328     for_each_sched_entity(se) {
11329         struct cfs_rq *cfs_rq = cfs_rq_of(se);
11330 
11331         if (forceidle) {
11332             if (cfs_rq->forceidle_seq == fi_seq)
11333                 break;
11334             cfs_rq->forceidle_seq = fi_seq;
11335         }
11336 
11337         cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
11338     }
11339 }
11340 
11341 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
11342 {
11343     struct sched_entity *se = &p->se;
11344 
11345     if (p->sched_class != &fair_sched_class)
11346         return;
11347 
11348     se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
11349 }
11350 
11351 bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
11352 {
11353     struct rq *rq = task_rq(a);
11354     struct sched_entity *sea = &a->se;
11355     struct sched_entity *seb = &b->se;
11356     struct cfs_rq *cfs_rqa;
11357     struct cfs_rq *cfs_rqb;
11358     s64 delta;
11359 
11360     SCHED_WARN_ON(task_rq(b)->core != rq->core);
11361 
11362 #ifdef CONFIG_FAIR_GROUP_SCHED
11363     /*
11364      * Find an se in the hierarchy for tasks a and b, such that the se's
11365      * are immediate siblings.
11366      */
11367     while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
11368         int sea_depth = sea->depth;
11369         int seb_depth = seb->depth;
11370 
11371         if (sea_depth >= seb_depth)
11372             sea = parent_entity(sea);
11373         if (sea_depth <= seb_depth)
11374             seb = parent_entity(seb);
11375     }
11376 
11377     se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
11378     se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
11379 
11380     cfs_rqa = sea->cfs_rq;
11381     cfs_rqb = seb->cfs_rq;
11382 #else
11383     cfs_rqa = &task_rq(a)->cfs;
11384     cfs_rqb = &task_rq(b)->cfs;
11385 #endif
11386 
11387     /*
11388      * Find delta after normalizing se's vruntime with its cfs_rq's
11389      * min_vruntime_fi, which would have been updated in prior calls
11390      * to se_fi_update().
11391      */
11392     delta = (s64)(sea->vruntime - seb->vruntime) +
11393         (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
11394 
11395     return delta > 0;
11396 }
11397 #else
11398 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
11399 #endif
11400 
11401 /*
11402  * scheduler tick hitting a task of our scheduling class.
11403  *
11404  * NOTE: This function can be called remotely by the tick offload that
11405  * goes along full dynticks. Therefore no local assumption can be made
11406  * and everything must be accessed through the @rq and @curr passed in
11407  * parameters.
11408  */
11409 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
11410 {
11411     struct cfs_rq *cfs_rq;
11412     struct sched_entity *se = &curr->se;
11413 
11414     for_each_sched_entity(se) {
11415         cfs_rq = cfs_rq_of(se);
11416         entity_tick(cfs_rq, se, queued);
11417     }
11418 
11419     if (static_branch_unlikely(&sched_numa_balancing))
11420         task_tick_numa(rq, curr);
11421 
11422     update_misfit_status(curr, rq);
11423     update_overutilized_status(task_rq(curr));
11424 
11425     task_tick_core(rq, curr);
11426 }
11427 
11428 /*
11429  * called on fork with the child task as argument from the parent's context
11430  *  - child not yet on the tasklist
11431  *  - preemption disabled
11432  */
11433 static void task_fork_fair(struct task_struct *p)
11434 {
11435     struct cfs_rq *cfs_rq;
11436     struct sched_entity *se = &p->se, *curr;
11437     struct rq *rq = this_rq();
11438     struct rq_flags rf;
11439 
11440     rq_lock(rq, &rf);
11441     update_rq_clock(rq);
11442 
11443     cfs_rq = task_cfs_rq(current);
11444     curr = cfs_rq->curr;
11445     if (curr) {
11446         update_curr(cfs_rq);
11447         se->vruntime = curr->vruntime;
11448     }
11449     place_entity(cfs_rq, se, 1);
11450 
11451     if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
11452         /*
11453          * Upon rescheduling, sched_class::put_prev_task() will place
11454          * 'current' within the tree based on its new key value.
11455          */
11456         swap(curr->vruntime, se->vruntime);
11457         resched_curr(rq);
11458     }
11459 
11460     se->vruntime -= cfs_rq->min_vruntime;
11461     rq_unlock(rq, &rf);
11462 }
11463 
11464 /*
11465  * Priority of the task has changed. Check to see if we preempt
11466  * the current task.
11467  */
11468 static void
11469 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
11470 {
11471     if (!task_on_rq_queued(p))
11472         return;
11473 
11474     if (rq->cfs.nr_running == 1)
11475         return;
11476 
11477     /*
11478      * Reschedule if we are currently running on this runqueue and
11479      * our priority decreased, or if we are not currently running on
11480      * this runqueue and our priority is higher than the current's
11481      */
11482     if (task_current(rq, p)) {
11483         if (p->prio > oldprio)
11484             resched_curr(rq);
11485     } else
11486         check_preempt_curr(rq, p, 0);
11487 }
11488 
11489 static inline bool vruntime_normalized(struct task_struct *p)
11490 {
11491     struct sched_entity *se = &p->se;
11492 
11493     /*
11494      * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
11495      * the dequeue_entity(.flags=0) will already have normalized the
11496      * vruntime.
11497      */
11498     if (p->on_rq)
11499         return true;
11500 
11501     /*
11502      * When !on_rq, vruntime of the task has usually NOT been normalized.
11503      * But there are some cases where it has already been normalized:
11504      *
11505      * - A forked child which is waiting for being woken up by
11506      *   wake_up_new_task().
11507      * - A task which has been woken up by try_to_wake_up() and
11508      *   waiting for actually being woken up by sched_ttwu_pending().
11509      */
11510     if (!se->sum_exec_runtime ||
11511         (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup))
11512         return true;
11513 
11514     return false;
11515 }
11516 
11517 #ifdef CONFIG_FAIR_GROUP_SCHED
11518 /*
11519  * Propagate the changes of the sched_entity across the tg tree to make it
11520  * visible to the root
11521  */
11522 static void propagate_entity_cfs_rq(struct sched_entity *se)
11523 {
11524     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11525 
11526     if (cfs_rq_throttled(cfs_rq))
11527         return;
11528 
11529     if (!throttled_hierarchy(cfs_rq))
11530         list_add_leaf_cfs_rq(cfs_rq);
11531 
11532     /* Start to propagate at parent */
11533     se = se->parent;
11534 
11535     for_each_sched_entity(se) {
11536         cfs_rq = cfs_rq_of(se);
11537 
11538         update_load_avg(cfs_rq, se, UPDATE_TG);
11539 
11540         if (cfs_rq_throttled(cfs_rq))
11541             break;
11542 
11543         if (!throttled_hierarchy(cfs_rq))
11544             list_add_leaf_cfs_rq(cfs_rq);
11545     }
11546 }
11547 #else
11548 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
11549 #endif
11550 
11551 static void detach_entity_cfs_rq(struct sched_entity *se)
11552 {
11553     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11554 
11555     /* Catch up with the cfs_rq and remove our load when we leave */
11556     update_load_avg(cfs_rq, se, 0);
11557     detach_entity_load_avg(cfs_rq, se);
11558     update_tg_load_avg(cfs_rq);
11559     propagate_entity_cfs_rq(se);
11560 }
11561 
11562 static void attach_entity_cfs_rq(struct sched_entity *se)
11563 {
11564     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11565 
11566 #ifdef CONFIG_FAIR_GROUP_SCHED
11567     /*
11568      * Since the real-depth could have been changed (only FAIR
11569      * class maintain depth value), reset depth properly.
11570      */
11571     se->depth = se->parent ? se->parent->depth + 1 : 0;
11572 #endif
11573 
11574     /* Synchronize entity with its cfs_rq */
11575     update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
11576     attach_entity_load_avg(cfs_rq, se);
11577     update_tg_load_avg(cfs_rq);
11578     propagate_entity_cfs_rq(se);
11579 }
11580 
11581 static void detach_task_cfs_rq(struct task_struct *p)
11582 {
11583     struct sched_entity *se = &p->se;
11584     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11585 
11586     if (!vruntime_normalized(p)) {
11587         /*
11588          * Fix up our vruntime so that the current sleep doesn't
11589          * cause 'unlimited' sleep bonus.
11590          */
11591         place_entity(cfs_rq, se, 0);
11592         se->vruntime -= cfs_rq->min_vruntime;
11593     }
11594 
11595     detach_entity_cfs_rq(se);
11596 }
11597 
11598 static void attach_task_cfs_rq(struct task_struct *p)
11599 {
11600     struct sched_entity *se = &p->se;
11601     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11602 
11603     attach_entity_cfs_rq(se);
11604 
11605     if (!vruntime_normalized(p))
11606         se->vruntime += cfs_rq->min_vruntime;
11607 }
11608 
11609 static void switched_from_fair(struct rq *rq, struct task_struct *p)
11610 {
11611     detach_task_cfs_rq(p);
11612 }
11613 
11614 static void switched_to_fair(struct rq *rq, struct task_struct *p)
11615 {
11616     attach_task_cfs_rq(p);
11617 
11618     if (task_on_rq_queued(p)) {
11619         /*
11620          * We were most likely switched from sched_rt, so
11621          * kick off the schedule if running, otherwise just see
11622          * if we can still preempt the current task.
11623          */
11624         if (task_current(rq, p))
11625             resched_curr(rq);
11626         else
11627             check_preempt_curr(rq, p, 0);
11628     }
11629 }
11630 
11631 /* Account for a task changing its policy or group.
11632  *
11633  * This routine is mostly called to set cfs_rq->curr field when a task
11634  * migrates between groups/classes.
11635  */
11636 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
11637 {
11638     struct sched_entity *se = &p->se;
11639 
11640 #ifdef CONFIG_SMP
11641     if (task_on_rq_queued(p)) {
11642         /*
11643          * Move the next running task to the front of the list, so our
11644          * cfs_tasks list becomes MRU one.
11645          */
11646         list_move(&se->group_node, &rq->cfs_tasks);
11647     }
11648 #endif
11649 
11650     for_each_sched_entity(se) {
11651         struct cfs_rq *cfs_rq = cfs_rq_of(se);
11652 
11653         set_next_entity(cfs_rq, se);
11654         /* ensure bandwidth has been allocated on our new cfs_rq */
11655         account_cfs_rq_runtime(cfs_rq, 0);
11656     }
11657 }
11658 
11659 void init_cfs_rq(struct cfs_rq *cfs_rq)
11660 {
11661     cfs_rq->tasks_timeline = RB_ROOT_CACHED;
11662     u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
11663 #ifdef CONFIG_SMP
11664     raw_spin_lock_init(&cfs_rq->removed.lock);
11665 #endif
11666 }
11667 
11668 #ifdef CONFIG_FAIR_GROUP_SCHED
11669 static void task_set_group_fair(struct task_struct *p)
11670 {
11671     struct sched_entity *se = &p->se;
11672 
11673     set_task_rq(p, task_cpu(p));
11674     se->depth = se->parent ? se->parent->depth + 1 : 0;
11675 }
11676 
11677 static void task_move_group_fair(struct task_struct *p)
11678 {
11679     detach_task_cfs_rq(p);
11680     set_task_rq(p, task_cpu(p));
11681 
11682 #ifdef CONFIG_SMP
11683     /* Tell se's cfs_rq has been changed -- migrated */
11684     p->se.avg.last_update_time = 0;
11685 #endif
11686     attach_task_cfs_rq(p);
11687 }
11688 
11689 static void task_change_group_fair(struct task_struct *p, int type)
11690 {
11691     switch (type) {
11692     case TASK_SET_GROUP:
11693         task_set_group_fair(p);
11694         break;
11695 
11696     case TASK_MOVE_GROUP:
11697         task_move_group_fair(p);
11698         break;
11699     }
11700 }
11701 
11702 void free_fair_sched_group(struct task_group *tg)
11703 {
11704     int i;
11705 
11706     for_each_possible_cpu(i) {
11707         if (tg->cfs_rq)
11708             kfree(tg->cfs_rq[i]);
11709         if (tg->se)
11710             kfree(tg->se[i]);
11711     }
11712 
11713     kfree(tg->cfs_rq);
11714     kfree(tg->se);
11715 }
11716 
11717 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
11718 {
11719     struct sched_entity *se;
11720     struct cfs_rq *cfs_rq;
11721     int i;
11722 
11723     tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
11724     if (!tg->cfs_rq)
11725         goto err;
11726     tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
11727     if (!tg->se)
11728         goto err;
11729 
11730     tg->shares = NICE_0_LOAD;
11731 
11732     init_cfs_bandwidth(tg_cfs_bandwidth(tg));
11733 
11734     for_each_possible_cpu(i) {
11735         cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
11736                       GFP_KERNEL, cpu_to_node(i));
11737         if (!cfs_rq)
11738             goto err;
11739 
11740         se = kzalloc_node(sizeof(struct sched_entity_stats),
11741                   GFP_KERNEL, cpu_to_node(i));
11742         if (!se)
11743             goto err_free_rq;
11744 
11745         init_cfs_rq(cfs_rq);
11746         init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
11747         init_entity_runnable_average(se);
11748     }
11749 
11750     return 1;
11751 
11752 err_free_rq:
11753     kfree(cfs_rq);
11754 err:
11755     return 0;
11756 }
11757 
11758 void online_fair_sched_group(struct task_group *tg)
11759 {
11760     struct sched_entity *se;
11761     struct rq_flags rf;
11762     struct rq *rq;
11763     int i;
11764 
11765     for_each_possible_cpu(i) {
11766         rq = cpu_rq(i);
11767         se = tg->se[i];
11768         rq_lock_irq(rq, &rf);
11769         update_rq_clock(rq);
11770         attach_entity_cfs_rq(se);
11771         sync_throttle(tg, i);
11772         rq_unlock_irq(rq, &rf);
11773     }
11774 }
11775 
11776 void unregister_fair_sched_group(struct task_group *tg)
11777 {
11778     unsigned long flags;
11779     struct rq *rq;
11780     int cpu;
11781 
11782     destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
11783 
11784     for_each_possible_cpu(cpu) {
11785         if (tg->se[cpu])
11786             remove_entity_load_avg(tg->se[cpu]);
11787 
11788         /*
11789          * Only empty task groups can be destroyed; so we can speculatively
11790          * check on_list without danger of it being re-added.
11791          */
11792         if (!tg->cfs_rq[cpu]->on_list)
11793             continue;
11794 
11795         rq = cpu_rq(cpu);
11796 
11797         raw_spin_rq_lock_irqsave(rq, flags);
11798         list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
11799         raw_spin_rq_unlock_irqrestore(rq, flags);
11800     }
11801 }
11802 
11803 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
11804             struct sched_entity *se, int cpu,
11805             struct sched_entity *parent)
11806 {
11807     struct rq *rq = cpu_rq(cpu);
11808 
11809     cfs_rq->tg = tg;
11810     cfs_rq->rq = rq;
11811     init_cfs_rq_runtime(cfs_rq);
11812 
11813     tg->cfs_rq[cpu] = cfs_rq;
11814     tg->se[cpu] = se;
11815 
11816     /* se could be NULL for root_task_group */
11817     if (!se)
11818         return;
11819 
11820     if (!parent) {
11821         se->cfs_rq = &rq->cfs;
11822         se->depth = 0;
11823     } else {
11824         se->cfs_rq = parent->my_q;
11825         se->depth = parent->depth + 1;
11826     }
11827 
11828     se->my_q = cfs_rq;
11829     /* guarantee group entities always have weight */
11830     update_load_set(&se->load, NICE_0_LOAD);
11831     se->parent = parent;
11832 }
11833 
11834 static DEFINE_MUTEX(shares_mutex);
11835 
11836 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
11837 {
11838     int i;
11839 
11840     lockdep_assert_held(&shares_mutex);
11841 
11842     /*
11843      * We can't change the weight of the root cgroup.
11844      */
11845     if (!tg->se[0])
11846         return -EINVAL;
11847 
11848     shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
11849 
11850     if (tg->shares == shares)
11851         return 0;
11852 
11853     tg->shares = shares;
11854     for_each_possible_cpu(i) {
11855         struct rq *rq = cpu_rq(i);
11856         struct sched_entity *se = tg->se[i];
11857         struct rq_flags rf;
11858 
11859         /* Propagate contribution to hierarchy */
11860         rq_lock_irqsave(rq, &rf);
11861         update_rq_clock(rq);
11862         for_each_sched_entity(se) {
11863             update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
11864             update_cfs_group(se);
11865         }
11866         rq_unlock_irqrestore(rq, &rf);
11867     }
11868 
11869     return 0;
11870 }
11871 
11872 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
11873 {
11874     int ret;
11875 
11876     mutex_lock(&shares_mutex);
11877     if (tg_is_idle(tg))
11878         ret = -EINVAL;
11879     else
11880         ret = __sched_group_set_shares(tg, shares);
11881     mutex_unlock(&shares_mutex);
11882 
11883     return ret;
11884 }
11885 
11886 int sched_group_set_idle(struct task_group *tg, long idle)
11887 {
11888     int i;
11889 
11890     if (tg == &root_task_group)
11891         return -EINVAL;
11892 
11893     if (idle < 0 || idle > 1)
11894         return -EINVAL;
11895 
11896     mutex_lock(&shares_mutex);
11897 
11898     if (tg->idle == idle) {
11899         mutex_unlock(&shares_mutex);
11900         return 0;
11901     }
11902 
11903     tg->idle = idle;
11904 
11905     for_each_possible_cpu(i) {
11906         struct rq *rq = cpu_rq(i);
11907         struct sched_entity *se = tg->se[i];
11908         struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i];
11909         bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
11910         long idle_task_delta;
11911         struct rq_flags rf;
11912 
11913         rq_lock_irqsave(rq, &rf);
11914 
11915         grp_cfs_rq->idle = idle;
11916         if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq)))
11917             goto next_cpu;
11918 
11919         if (se->on_rq) {
11920             parent_cfs_rq = cfs_rq_of(se);
11921             if (cfs_rq_is_idle(grp_cfs_rq))
11922                 parent_cfs_rq->idle_nr_running++;
11923             else
11924                 parent_cfs_rq->idle_nr_running--;
11925         }
11926 
11927         idle_task_delta = grp_cfs_rq->h_nr_running -
11928                   grp_cfs_rq->idle_h_nr_running;
11929         if (!cfs_rq_is_idle(grp_cfs_rq))
11930             idle_task_delta *= -1;
11931 
11932         for_each_sched_entity(se) {
11933             struct cfs_rq *cfs_rq = cfs_rq_of(se);
11934 
11935             if (!se->on_rq)
11936                 break;
11937 
11938             cfs_rq->idle_h_nr_running += idle_task_delta;
11939 
11940             /* Already accounted at parent level and above. */
11941             if (cfs_rq_is_idle(cfs_rq))
11942                 break;
11943         }
11944 
11945 next_cpu:
11946         rq_unlock_irqrestore(rq, &rf);
11947     }
11948 
11949     /* Idle groups have minimum weight. */
11950     if (tg_is_idle(tg))
11951         __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO));
11952     else
11953         __sched_group_set_shares(tg, NICE_0_LOAD);
11954 
11955     mutex_unlock(&shares_mutex);
11956     return 0;
11957 }
11958 
11959 #else /* CONFIG_FAIR_GROUP_SCHED */
11960 
11961 void free_fair_sched_group(struct task_group *tg) { }
11962 
11963 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
11964 {
11965     return 1;
11966 }
11967 
11968 void online_fair_sched_group(struct task_group *tg) { }
11969 
11970 void unregister_fair_sched_group(struct task_group *tg) { }
11971 
11972 #endif /* CONFIG_FAIR_GROUP_SCHED */
11973 
11974 
11975 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
11976 {
11977     struct sched_entity *se = &task->se;
11978     unsigned int rr_interval = 0;
11979 
11980     /*
11981      * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
11982      * idle runqueue:
11983      */
11984     if (rq->cfs.load.weight)
11985         rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
11986 
11987     return rr_interval;
11988 }
11989 
11990 /*
11991  * All the scheduling class methods:
11992  */
11993 DEFINE_SCHED_CLASS(fair) = {
11994 
11995     .enqueue_task       = enqueue_task_fair,
11996     .dequeue_task       = dequeue_task_fair,
11997     .yield_task     = yield_task_fair,
11998     .yield_to_task      = yield_to_task_fair,
11999 
12000     .check_preempt_curr = check_preempt_wakeup,
12001 
12002     .pick_next_task     = __pick_next_task_fair,
12003     .put_prev_task      = put_prev_task_fair,
12004     .set_next_task          = set_next_task_fair,
12005 
12006 #ifdef CONFIG_SMP
12007     .balance        = balance_fair,
12008     .pick_task      = pick_task_fair,
12009     .select_task_rq     = select_task_rq_fair,
12010     .migrate_task_rq    = migrate_task_rq_fair,
12011 
12012     .rq_online      = rq_online_fair,
12013     .rq_offline     = rq_offline_fair,
12014 
12015     .task_dead      = task_dead_fair,
12016     .set_cpus_allowed   = set_cpus_allowed_common,
12017 #endif
12018 
12019     .task_tick      = task_tick_fair,
12020     .task_fork      = task_fork_fair,
12021 
12022     .prio_changed       = prio_changed_fair,
12023     .switched_from      = switched_from_fair,
12024     .switched_to        = switched_to_fair,
12025 
12026     .get_rr_interval    = get_rr_interval_fair,
12027 
12028     .update_curr        = update_curr_fair,
12029 
12030 #ifdef CONFIG_FAIR_GROUP_SCHED
12031     .task_change_group  = task_change_group_fair,
12032 #endif
12033 
12034 #ifdef CONFIG_UCLAMP_TASK
12035     .uclamp_enabled     = 1,
12036 #endif
12037 };
12038 
12039 #ifdef CONFIG_SCHED_DEBUG
12040 void print_cfs_stats(struct seq_file *m, int cpu)
12041 {
12042     struct cfs_rq *cfs_rq, *pos;
12043 
12044     rcu_read_lock();
12045     for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
12046         print_cfs_rq(m, cpu, cfs_rq);
12047     rcu_read_unlock();
12048 }
12049 
12050 #ifdef CONFIG_NUMA_BALANCING
12051 void show_numa_stats(struct task_struct *p, struct seq_file *m)
12052 {
12053     int node;
12054     unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
12055     struct numa_group *ng;
12056 
12057     rcu_read_lock();
12058     ng = rcu_dereference(p->numa_group);
12059     for_each_online_node(node) {
12060         if (p->numa_faults) {
12061             tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
12062             tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
12063         }
12064         if (ng) {
12065             gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
12066             gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
12067         }
12068         print_numa_stats(m, node, tsf, tpf, gsf, gpf);
12069     }
12070     rcu_read_unlock();
12071 }
12072 #endif /* CONFIG_NUMA_BALANCING */
12073 #endif /* CONFIG_SCHED_DEBUG */
12074 
12075 __init void init_sched_fair_class(void)
12076 {
12077 #ifdef CONFIG_SMP
12078     open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
12079 
12080 #ifdef CONFIG_NO_HZ_COMMON
12081     nohz.next_balance = jiffies;
12082     nohz.next_blocked = jiffies;
12083     zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
12084 #endif
12085 #endif /* SMP */
12086 
12087 }