Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SCHED_H
0003 #define _LINUX_SCHED_H
0004 
0005 /*
0006  * Define 'struct task_struct' and provide the main scheduler
0007  * APIs (schedule(), wakeup variants, etc.)
0008  */
0009 
0010 #include <uapi/linux/sched.h>
0011 
0012 #include <asm/current.h>
0013 
0014 #include <linux/pid.h>
0015 #include <linux/sem.h>
0016 #include <linux/shm.h>
0017 #include <linux/mutex.h>
0018 #include <linux/plist.h>
0019 #include <linux/hrtimer.h>
0020 #include <linux/irqflags.h>
0021 #include <linux/seccomp.h>
0022 #include <linux/nodemask.h>
0023 #include <linux/rcupdate.h>
0024 #include <linux/refcount.h>
0025 #include <linux/resource.h>
0026 #include <linux/latencytop.h>
0027 #include <linux/sched/prio.h>
0028 #include <linux/sched/types.h>
0029 #include <linux/signal_types.h>
0030 #include <linux/syscall_user_dispatch.h>
0031 #include <linux/mm_types_task.h>
0032 #include <linux/task_io_accounting.h>
0033 #include <linux/posix-timers.h>
0034 #include <linux/rseq.h>
0035 #include <linux/seqlock.h>
0036 #include <linux/kcsan.h>
0037 #include <linux/rv.h>
0038 #include <asm/kmap_size.h>
0039 
0040 /* task_struct member predeclarations (sorted alphabetically): */
0041 struct audit_context;
0042 struct backing_dev_info;
0043 struct bio_list;
0044 struct blk_plug;
0045 struct bpf_local_storage;
0046 struct bpf_run_ctx;
0047 struct capture_control;
0048 struct cfs_rq;
0049 struct fs_struct;
0050 struct futex_pi_state;
0051 struct io_context;
0052 struct io_uring_task;
0053 struct mempolicy;
0054 struct nameidata;
0055 struct nsproxy;
0056 struct perf_event_context;
0057 struct pid_namespace;
0058 struct pipe_inode_info;
0059 struct rcu_node;
0060 struct reclaim_state;
0061 struct robust_list_head;
0062 struct root_domain;
0063 struct rq;
0064 struct sched_attr;
0065 struct sched_param;
0066 struct seq_file;
0067 struct sighand_struct;
0068 struct signal_struct;
0069 struct task_delay_info;
0070 struct task_group;
0071 
0072 /*
0073  * Task state bitmask. NOTE! These bits are also
0074  * encoded in fs/proc/array.c: get_task_state().
0075  *
0076  * We have two separate sets of flags: task->state
0077  * is about runnability, while task->exit_state are
0078  * about the task exiting. Confusing, but this way
0079  * modifying one set can't modify the other one by
0080  * mistake.
0081  */
0082 
0083 /* Used in tsk->state: */
0084 #define TASK_RUNNING            0x0000
0085 #define TASK_INTERRUPTIBLE      0x0001
0086 #define TASK_UNINTERRUPTIBLE        0x0002
0087 #define __TASK_STOPPED          0x0004
0088 #define __TASK_TRACED           0x0008
0089 /* Used in tsk->exit_state: */
0090 #define EXIT_DEAD           0x0010
0091 #define EXIT_ZOMBIE         0x0020
0092 #define EXIT_TRACE          (EXIT_ZOMBIE | EXIT_DEAD)
0093 /* Used in tsk->state again: */
0094 #define TASK_PARKED         0x0040
0095 #define TASK_DEAD           0x0080
0096 #define TASK_WAKEKILL           0x0100
0097 #define TASK_WAKING         0x0200
0098 #define TASK_NOLOAD         0x0400
0099 #define TASK_NEW            0x0800
0100 /* RT specific auxilliary flag to mark RT lock waiters */
0101 #define TASK_RTLOCK_WAIT        0x1000
0102 #define TASK_STATE_MAX          0x2000
0103 
0104 /* Convenience macros for the sake of set_current_state: */
0105 #define TASK_KILLABLE           (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
0106 #define TASK_STOPPED            (TASK_WAKEKILL | __TASK_STOPPED)
0107 #define TASK_TRACED         __TASK_TRACED
0108 
0109 #define TASK_IDLE           (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
0110 
0111 /* Convenience macros for the sake of wake_up(): */
0112 #define TASK_NORMAL         (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
0113 
0114 /* get_task_state(): */
0115 #define TASK_REPORT         (TASK_RUNNING | TASK_INTERRUPTIBLE | \
0116                      TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
0117                      __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
0118                      TASK_PARKED)
0119 
0120 #define task_is_running(task)       (READ_ONCE((task)->__state) == TASK_RUNNING)
0121 
0122 #define task_is_traced(task)        ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
0123 #define task_is_stopped(task)       ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
0124 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
0125 
0126 /*
0127  * Special states are those that do not use the normal wait-loop pattern. See
0128  * the comment with set_special_state().
0129  */
0130 #define is_special_task_state(state)                \
0131     ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
0132 
0133 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
0134 # define debug_normal_state_change(state_value)             \
0135     do {                                \
0136         WARN_ON_ONCE(is_special_task_state(state_value));   \
0137         current->task_state_change = _THIS_IP_;         \
0138     } while (0)
0139 
0140 # define debug_special_state_change(state_value)            \
0141     do {                                \
0142         WARN_ON_ONCE(!is_special_task_state(state_value));  \
0143         current->task_state_change = _THIS_IP_;         \
0144     } while (0)
0145 
0146 # define debug_rtlock_wait_set_state()                  \
0147     do {                                 \
0148         current->saved_state_change = current->task_state_change;\
0149         current->task_state_change = _THIS_IP_;          \
0150     } while (0)
0151 
0152 # define debug_rtlock_wait_restore_state()              \
0153     do {                                 \
0154         current->task_state_change = current->saved_state_change;\
0155     } while (0)
0156 
0157 #else
0158 # define debug_normal_state_change(cond)    do { } while (0)
0159 # define debug_special_state_change(cond)   do { } while (0)
0160 # define debug_rtlock_wait_set_state()      do { } while (0)
0161 # define debug_rtlock_wait_restore_state()  do { } while (0)
0162 #endif
0163 
0164 /*
0165  * set_current_state() includes a barrier so that the write of current->state
0166  * is correctly serialised wrt the caller's subsequent test of whether to
0167  * actually sleep:
0168  *
0169  *   for (;;) {
0170  *  set_current_state(TASK_UNINTERRUPTIBLE);
0171  *  if (CONDITION)
0172  *     break;
0173  *
0174  *  schedule();
0175  *   }
0176  *   __set_current_state(TASK_RUNNING);
0177  *
0178  * If the caller does not need such serialisation (because, for instance, the
0179  * CONDITION test and condition change and wakeup are under the same lock) then
0180  * use __set_current_state().
0181  *
0182  * The above is typically ordered against the wakeup, which does:
0183  *
0184  *   CONDITION = 1;
0185  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
0186  *
0187  * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
0188  * accessing p->state.
0189  *
0190  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
0191  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
0192  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
0193  *
0194  * However, with slightly different timing the wakeup TASK_RUNNING store can
0195  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
0196  * a problem either because that will result in one extra go around the loop
0197  * and our @cond test will save the day.
0198  *
0199  * Also see the comments of try_to_wake_up().
0200  */
0201 #define __set_current_state(state_value)                \
0202     do {                                \
0203         debug_normal_state_change((state_value));       \
0204         WRITE_ONCE(current->__state, (state_value));        \
0205     } while (0)
0206 
0207 #define set_current_state(state_value)                  \
0208     do {                                \
0209         debug_normal_state_change((state_value));       \
0210         smp_store_mb(current->__state, (state_value));      \
0211     } while (0)
0212 
0213 /*
0214  * set_special_state() should be used for those states when the blocking task
0215  * can not use the regular condition based wait-loop. In that case we must
0216  * serialize against wakeups such that any possible in-flight TASK_RUNNING
0217  * stores will not collide with our state change.
0218  */
0219 #define set_special_state(state_value)                  \
0220     do {                                \
0221         unsigned long flags; /* may shadow */           \
0222                                     \
0223         raw_spin_lock_irqsave(&current->pi_lock, flags);    \
0224         debug_special_state_change((state_value));      \
0225         WRITE_ONCE(current->__state, (state_value));        \
0226         raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
0227     } while (0)
0228 
0229 /*
0230  * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
0231  *
0232  * RT's spin/rwlock substitutions are state preserving. The state of the
0233  * task when blocking on the lock is saved in task_struct::saved_state and
0234  * restored after the lock has been acquired.  These operations are
0235  * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
0236  * lock related wakeups while the task is blocked on the lock are
0237  * redirected to operate on task_struct::saved_state to ensure that these
0238  * are not dropped. On restore task_struct::saved_state is set to
0239  * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
0240  *
0241  * The lock operation looks like this:
0242  *
0243  *  current_save_and_set_rtlock_wait_state();
0244  *  for (;;) {
0245  *      if (try_lock())
0246  *          break;
0247  *      raw_spin_unlock_irq(&lock->wait_lock);
0248  *      schedule_rtlock();
0249  *      raw_spin_lock_irq(&lock->wait_lock);
0250  *      set_current_state(TASK_RTLOCK_WAIT);
0251  *  }
0252  *  current_restore_rtlock_saved_state();
0253  */
0254 #define current_save_and_set_rtlock_wait_state()            \
0255     do {                                \
0256         lockdep_assert_irqs_disabled();             \
0257         raw_spin_lock(&current->pi_lock);           \
0258         current->saved_state = current->__state;        \
0259         debug_rtlock_wait_set_state();              \
0260         WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);     \
0261         raw_spin_unlock(&current->pi_lock);         \
0262     } while (0);
0263 
0264 #define current_restore_rtlock_saved_state()                \
0265     do {                                \
0266         lockdep_assert_irqs_disabled();             \
0267         raw_spin_lock(&current->pi_lock);           \
0268         debug_rtlock_wait_restore_state();          \
0269         WRITE_ONCE(current->__state, current->saved_state); \
0270         current->saved_state = TASK_RUNNING;            \
0271         raw_spin_unlock(&current->pi_lock);         \
0272     } while (0);
0273 
0274 #define get_current_state() READ_ONCE(current->__state)
0275 
0276 /*
0277  * Define the task command name length as enum, then it can be visible to
0278  * BPF programs.
0279  */
0280 enum {
0281     TASK_COMM_LEN = 16,
0282 };
0283 
0284 extern void scheduler_tick(void);
0285 
0286 #define MAX_SCHEDULE_TIMEOUT        LONG_MAX
0287 
0288 extern long schedule_timeout(long timeout);
0289 extern long schedule_timeout_interruptible(long timeout);
0290 extern long schedule_timeout_killable(long timeout);
0291 extern long schedule_timeout_uninterruptible(long timeout);
0292 extern long schedule_timeout_idle(long timeout);
0293 asmlinkage void schedule(void);
0294 extern void schedule_preempt_disabled(void);
0295 asmlinkage void preempt_schedule_irq(void);
0296 #ifdef CONFIG_PREEMPT_RT
0297  extern void schedule_rtlock(void);
0298 #endif
0299 
0300 extern int __must_check io_schedule_prepare(void);
0301 extern void io_schedule_finish(int token);
0302 extern long io_schedule_timeout(long timeout);
0303 extern void io_schedule(void);
0304 
0305 /**
0306  * struct prev_cputime - snapshot of system and user cputime
0307  * @utime: time spent in user mode
0308  * @stime: time spent in system mode
0309  * @lock: protects the above two fields
0310  *
0311  * Stores previous user/system time values such that we can guarantee
0312  * monotonicity.
0313  */
0314 struct prev_cputime {
0315 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0316     u64             utime;
0317     u64             stime;
0318     raw_spinlock_t          lock;
0319 #endif
0320 };
0321 
0322 enum vtime_state {
0323     /* Task is sleeping or running in a CPU with VTIME inactive: */
0324     VTIME_INACTIVE = 0,
0325     /* Task is idle */
0326     VTIME_IDLE,
0327     /* Task runs in kernelspace in a CPU with VTIME active: */
0328     VTIME_SYS,
0329     /* Task runs in userspace in a CPU with VTIME active: */
0330     VTIME_USER,
0331     /* Task runs as guests in a CPU with VTIME active: */
0332     VTIME_GUEST,
0333 };
0334 
0335 struct vtime {
0336     seqcount_t      seqcount;
0337     unsigned long long  starttime;
0338     enum vtime_state    state;
0339     unsigned int        cpu;
0340     u64         utime;
0341     u64         stime;
0342     u64         gtime;
0343 };
0344 
0345 /*
0346  * Utilization clamp constraints.
0347  * @UCLAMP_MIN: Minimum utilization
0348  * @UCLAMP_MAX: Maximum utilization
0349  * @UCLAMP_CNT: Utilization clamp constraints count
0350  */
0351 enum uclamp_id {
0352     UCLAMP_MIN = 0,
0353     UCLAMP_MAX,
0354     UCLAMP_CNT
0355 };
0356 
0357 #ifdef CONFIG_SMP
0358 extern struct root_domain def_root_domain;
0359 extern struct mutex sched_domains_mutex;
0360 #endif
0361 
0362 struct sched_info {
0363 #ifdef CONFIG_SCHED_INFO
0364     /* Cumulative counters: */
0365 
0366     /* # of times we have run on this CPU: */
0367     unsigned long           pcount;
0368 
0369     /* Time spent waiting on a runqueue: */
0370     unsigned long long      run_delay;
0371 
0372     /* Timestamps: */
0373 
0374     /* When did we last run on a CPU? */
0375     unsigned long long      last_arrival;
0376 
0377     /* When were we last queued to run? */
0378     unsigned long long      last_queued;
0379 
0380 #endif /* CONFIG_SCHED_INFO */
0381 };
0382 
0383 /*
0384  * Integer metrics need fixed point arithmetic, e.g., sched/fair
0385  * has a few: load, load_avg, util_avg, freq, and capacity.
0386  *
0387  * We define a basic fixed point arithmetic range, and then formalize
0388  * all these metrics based on that basic range.
0389  */
0390 # define SCHED_FIXEDPOINT_SHIFT     10
0391 # define SCHED_FIXEDPOINT_SCALE     (1L << SCHED_FIXEDPOINT_SHIFT)
0392 
0393 /* Increase resolution of cpu_capacity calculations */
0394 # define SCHED_CAPACITY_SHIFT       SCHED_FIXEDPOINT_SHIFT
0395 # define SCHED_CAPACITY_SCALE       (1L << SCHED_CAPACITY_SHIFT)
0396 
0397 struct load_weight {
0398     unsigned long           weight;
0399     u32             inv_weight;
0400 };
0401 
0402 /**
0403  * struct util_est - Estimation utilization of FAIR tasks
0404  * @enqueued: instantaneous estimated utilization of a task/cpu
0405  * @ewma:     the Exponential Weighted Moving Average (EWMA)
0406  *            utilization of a task
0407  *
0408  * Support data structure to track an Exponential Weighted Moving Average
0409  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
0410  * average each time a task completes an activation. Sample's weight is chosen
0411  * so that the EWMA will be relatively insensitive to transient changes to the
0412  * task's workload.
0413  *
0414  * The enqueued attribute has a slightly different meaning for tasks and cpus:
0415  * - task:   the task's util_avg at last task dequeue time
0416  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
0417  * Thus, the util_est.enqueued of a task represents the contribution on the
0418  * estimated utilization of the CPU where that task is currently enqueued.
0419  *
0420  * Only for tasks we track a moving average of the past instantaneous
0421  * estimated utilization. This allows to absorb sporadic drops in utilization
0422  * of an otherwise almost periodic task.
0423  *
0424  * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
0425  * updates. When a task is dequeued, its util_est should not be updated if its
0426  * util_avg has not been updated in the meantime.
0427  * This information is mapped into the MSB bit of util_est.enqueued at dequeue
0428  * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
0429  * for a task) it is safe to use MSB.
0430  */
0431 struct util_est {
0432     unsigned int            enqueued;
0433     unsigned int            ewma;
0434 #define UTIL_EST_WEIGHT_SHIFT       2
0435 #define UTIL_AVG_UNCHANGED      0x80000000
0436 } __attribute__((__aligned__(sizeof(u64))));
0437 
0438 /*
0439  * The load/runnable/util_avg accumulates an infinite geometric series
0440  * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
0441  *
0442  * [load_avg definition]
0443  *
0444  *   load_avg = runnable% * scale_load_down(load)
0445  *
0446  * [runnable_avg definition]
0447  *
0448  *   runnable_avg = runnable% * SCHED_CAPACITY_SCALE
0449  *
0450  * [util_avg definition]
0451  *
0452  *   util_avg = running% * SCHED_CAPACITY_SCALE
0453  *
0454  * where runnable% is the time ratio that a sched_entity is runnable and
0455  * running% the time ratio that a sched_entity is running.
0456  *
0457  * For cfs_rq, they are the aggregated values of all runnable and blocked
0458  * sched_entities.
0459  *
0460  * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
0461  * capacity scaling. The scaling is done through the rq_clock_pelt that is used
0462  * for computing those signals (see update_rq_clock_pelt())
0463  *
0464  * N.B., the above ratios (runnable% and running%) themselves are in the
0465  * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
0466  * to as large a range as necessary. This is for example reflected by
0467  * util_avg's SCHED_CAPACITY_SCALE.
0468  *
0469  * [Overflow issue]
0470  *
0471  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
0472  * with the highest load (=88761), always runnable on a single cfs_rq,
0473  * and should not overflow as the number already hits PID_MAX_LIMIT.
0474  *
0475  * For all other cases (including 32-bit kernels), struct load_weight's
0476  * weight will overflow first before we do, because:
0477  *
0478  *    Max(load_avg) <= Max(load.weight)
0479  *
0480  * Then it is the load_weight's responsibility to consider overflow
0481  * issues.
0482  */
0483 struct sched_avg {
0484     u64             last_update_time;
0485     u64             load_sum;
0486     u64             runnable_sum;
0487     u32             util_sum;
0488     u32             period_contrib;
0489     unsigned long           load_avg;
0490     unsigned long           runnable_avg;
0491     unsigned long           util_avg;
0492     struct util_est         util_est;
0493 } ____cacheline_aligned;
0494 
0495 struct sched_statistics {
0496 #ifdef CONFIG_SCHEDSTATS
0497     u64             wait_start;
0498     u64             wait_max;
0499     u64             wait_count;
0500     u64             wait_sum;
0501     u64             iowait_count;
0502     u64             iowait_sum;
0503 
0504     u64             sleep_start;
0505     u64             sleep_max;
0506     s64             sum_sleep_runtime;
0507 
0508     u64             block_start;
0509     u64             block_max;
0510     s64             sum_block_runtime;
0511 
0512     u64             exec_max;
0513     u64             slice_max;
0514 
0515     u64             nr_migrations_cold;
0516     u64             nr_failed_migrations_affine;
0517     u64             nr_failed_migrations_running;
0518     u64             nr_failed_migrations_hot;
0519     u64             nr_forced_migrations;
0520 
0521     u64             nr_wakeups;
0522     u64             nr_wakeups_sync;
0523     u64             nr_wakeups_migrate;
0524     u64             nr_wakeups_local;
0525     u64             nr_wakeups_remote;
0526     u64             nr_wakeups_affine;
0527     u64             nr_wakeups_affine_attempts;
0528     u64             nr_wakeups_passive;
0529     u64             nr_wakeups_idle;
0530 
0531 #ifdef CONFIG_SCHED_CORE
0532     u64             core_forceidle_sum;
0533 #endif
0534 #endif /* CONFIG_SCHEDSTATS */
0535 } ____cacheline_aligned;
0536 
0537 struct sched_entity {
0538     /* For load-balancing: */
0539     struct load_weight      load;
0540     struct rb_node          run_node;
0541     struct list_head        group_node;
0542     unsigned int            on_rq;
0543 
0544     u64             exec_start;
0545     u64             sum_exec_runtime;
0546     u64             vruntime;
0547     u64             prev_sum_exec_runtime;
0548 
0549     u64             nr_migrations;
0550 
0551 #ifdef CONFIG_FAIR_GROUP_SCHED
0552     int             depth;
0553     struct sched_entity     *parent;
0554     /* rq on which this entity is (to be) queued: */
0555     struct cfs_rq           *cfs_rq;
0556     /* rq "owned" by this entity/group: */
0557     struct cfs_rq           *my_q;
0558     /* cached value of my_q->h_nr_running */
0559     unsigned long           runnable_weight;
0560 #endif
0561 
0562 #ifdef CONFIG_SMP
0563     /*
0564      * Per entity load average tracking.
0565      *
0566      * Put into separate cache line so it does not
0567      * collide with read-mostly values above.
0568      */
0569     struct sched_avg        avg;
0570 #endif
0571 };
0572 
0573 struct sched_rt_entity {
0574     struct list_head        run_list;
0575     unsigned long           timeout;
0576     unsigned long           watchdog_stamp;
0577     unsigned int            time_slice;
0578     unsigned short          on_rq;
0579     unsigned short          on_list;
0580 
0581     struct sched_rt_entity      *back;
0582 #ifdef CONFIG_RT_GROUP_SCHED
0583     struct sched_rt_entity      *parent;
0584     /* rq on which this entity is (to be) queued: */
0585     struct rt_rq            *rt_rq;
0586     /* rq "owned" by this entity/group: */
0587     struct rt_rq            *my_q;
0588 #endif
0589 } __randomize_layout;
0590 
0591 struct sched_dl_entity {
0592     struct rb_node          rb_node;
0593 
0594     /*
0595      * Original scheduling parameters. Copied here from sched_attr
0596      * during sched_setattr(), they will remain the same until
0597      * the next sched_setattr().
0598      */
0599     u64             dl_runtime; /* Maximum runtime for each instance    */
0600     u64             dl_deadline;    /* Relative deadline of each instance   */
0601     u64             dl_period;  /* Separation of two instances (period) */
0602     u64             dl_bw;      /* dl_runtime / dl_period       */
0603     u64             dl_density; /* dl_runtime / dl_deadline     */
0604 
0605     /*
0606      * Actual scheduling parameters. Initialized with the values above,
0607      * they are continuously updated during task execution. Note that
0608      * the remaining runtime could be < 0 in case we are in overrun.
0609      */
0610     s64             runtime;    /* Remaining runtime for this instance  */
0611     u64             deadline;   /* Absolute deadline for this instance  */
0612     unsigned int            flags;      /* Specifying the scheduler behaviour   */
0613 
0614     /*
0615      * Some bool flags:
0616      *
0617      * @dl_throttled tells if we exhausted the runtime. If so, the
0618      * task has to wait for a replenishment to be performed at the
0619      * next firing of dl_timer.
0620      *
0621      * @dl_yielded tells if task gave up the CPU before consuming
0622      * all its available runtime during the last job.
0623      *
0624      * @dl_non_contending tells if the task is inactive while still
0625      * contributing to the active utilization. In other words, it
0626      * indicates if the inactive timer has been armed and its handler
0627      * has not been executed yet. This flag is useful to avoid race
0628      * conditions between the inactive timer handler and the wakeup
0629      * code.
0630      *
0631      * @dl_overrun tells if the task asked to be informed about runtime
0632      * overruns.
0633      */
0634     unsigned int            dl_throttled      : 1;
0635     unsigned int            dl_yielded        : 1;
0636     unsigned int            dl_non_contending : 1;
0637     unsigned int            dl_overrun    : 1;
0638 
0639     /*
0640      * Bandwidth enforcement timer. Each -deadline task has its
0641      * own bandwidth to be enforced, thus we need one timer per task.
0642      */
0643     struct hrtimer          dl_timer;
0644 
0645     /*
0646      * Inactive timer, responsible for decreasing the active utilization
0647      * at the "0-lag time". When a -deadline task blocks, it contributes
0648      * to GRUB's active utilization until the "0-lag time", hence a
0649      * timer is needed to decrease the active utilization at the correct
0650      * time.
0651      */
0652     struct hrtimer inactive_timer;
0653 
0654 #ifdef CONFIG_RT_MUTEXES
0655     /*
0656      * Priority Inheritance. When a DEADLINE scheduling entity is boosted
0657      * pi_se points to the donor, otherwise points to the dl_se it belongs
0658      * to (the original one/itself).
0659      */
0660     struct sched_dl_entity *pi_se;
0661 #endif
0662 };
0663 
0664 #ifdef CONFIG_UCLAMP_TASK
0665 /* Number of utilization clamp buckets (shorter alias) */
0666 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
0667 
0668 /*
0669  * Utilization clamp for a scheduling entity
0670  * @value:      clamp value "assigned" to a se
0671  * @bucket_id:      bucket index corresponding to the "assigned" value
0672  * @active:     the se is currently refcounted in a rq's bucket
0673  * @user_defined:   the requested clamp value comes from user-space
0674  *
0675  * The bucket_id is the index of the clamp bucket matching the clamp value
0676  * which is pre-computed and stored to avoid expensive integer divisions from
0677  * the fast path.
0678  *
0679  * The active bit is set whenever a task has got an "effective" value assigned,
0680  * which can be different from the clamp value "requested" from user-space.
0681  * This allows to know a task is refcounted in the rq's bucket corresponding
0682  * to the "effective" bucket_id.
0683  *
0684  * The user_defined bit is set whenever a task has got a task-specific clamp
0685  * value requested from userspace, i.e. the system defaults apply to this task
0686  * just as a restriction. This allows to relax default clamps when a less
0687  * restrictive task-specific value has been requested, thus allowing to
0688  * implement a "nice" semantic. For example, a task running with a 20%
0689  * default boost can still drop its own boosting to 0%.
0690  */
0691 struct uclamp_se {
0692     unsigned int value      : bits_per(SCHED_CAPACITY_SCALE);
0693     unsigned int bucket_id      : bits_per(UCLAMP_BUCKETS);
0694     unsigned int active     : 1;
0695     unsigned int user_defined   : 1;
0696 };
0697 #endif /* CONFIG_UCLAMP_TASK */
0698 
0699 union rcu_special {
0700     struct {
0701         u8          blocked;
0702         u8          need_qs;
0703         u8          exp_hint; /* Hint for performance. */
0704         u8          need_mb; /* Readers need smp_mb(). */
0705     } b; /* Bits. */
0706     u32 s; /* Set of bits. */
0707 };
0708 
0709 enum perf_event_task_context {
0710     perf_invalid_context = -1,
0711     perf_hw_context = 0,
0712     perf_sw_context,
0713     perf_nr_task_contexts,
0714 };
0715 
0716 struct wake_q_node {
0717     struct wake_q_node *next;
0718 };
0719 
0720 struct kmap_ctrl {
0721 #ifdef CONFIG_KMAP_LOCAL
0722     int             idx;
0723     pte_t               pteval[KM_MAX_IDX];
0724 #endif
0725 };
0726 
0727 struct task_struct {
0728 #ifdef CONFIG_THREAD_INFO_IN_TASK
0729     /*
0730      * For reasons of header soup (see current_thread_info()), this
0731      * must be the first element of task_struct.
0732      */
0733     struct thread_info      thread_info;
0734 #endif
0735     unsigned int            __state;
0736 
0737 #ifdef CONFIG_PREEMPT_RT
0738     /* saved state for "spinlock sleepers" */
0739     unsigned int            saved_state;
0740 #endif
0741 
0742     /*
0743      * This begins the randomizable portion of task_struct. Only
0744      * scheduling-critical items should be added above here.
0745      */
0746     randomized_struct_fields_start
0747 
0748     void                *stack;
0749     refcount_t          usage;
0750     /* Per task flags (PF_*), defined further below: */
0751     unsigned int            flags;
0752     unsigned int            ptrace;
0753 
0754 #ifdef CONFIG_SMP
0755     int             on_cpu;
0756     struct __call_single_node   wake_entry;
0757     unsigned int            wakee_flips;
0758     unsigned long           wakee_flip_decay_ts;
0759     struct task_struct      *last_wakee;
0760 
0761     /*
0762      * recent_used_cpu is initially set as the last CPU used by a task
0763      * that wakes affine another task. Waker/wakee relationships can
0764      * push tasks around a CPU where each wakeup moves to the next one.
0765      * Tracking a recently used CPU allows a quick search for a recently
0766      * used CPU that may be idle.
0767      */
0768     int             recent_used_cpu;
0769     int             wake_cpu;
0770 #endif
0771     int             on_rq;
0772 
0773     int             prio;
0774     int             static_prio;
0775     int             normal_prio;
0776     unsigned int            rt_priority;
0777 
0778     struct sched_entity     se;
0779     struct sched_rt_entity      rt;
0780     struct sched_dl_entity      dl;
0781     const struct sched_class    *sched_class;
0782 
0783 #ifdef CONFIG_SCHED_CORE
0784     struct rb_node          core_node;
0785     unsigned long           core_cookie;
0786     unsigned int            core_occupation;
0787 #endif
0788 
0789 #ifdef CONFIG_CGROUP_SCHED
0790     struct task_group       *sched_task_group;
0791 #endif
0792 
0793 #ifdef CONFIG_UCLAMP_TASK
0794     /*
0795      * Clamp values requested for a scheduling entity.
0796      * Must be updated with task_rq_lock() held.
0797      */
0798     struct uclamp_se        uclamp_req[UCLAMP_CNT];
0799     /*
0800      * Effective clamp values used for a scheduling entity.
0801      * Must be updated with task_rq_lock() held.
0802      */
0803     struct uclamp_se        uclamp[UCLAMP_CNT];
0804 #endif
0805 
0806     struct sched_statistics         stats;
0807 
0808 #ifdef CONFIG_PREEMPT_NOTIFIERS
0809     /* List of struct preempt_notifier: */
0810     struct hlist_head       preempt_notifiers;
0811 #endif
0812 
0813 #ifdef CONFIG_BLK_DEV_IO_TRACE
0814     unsigned int            btrace_seq;
0815 #endif
0816 
0817     unsigned int            policy;
0818     int             nr_cpus_allowed;
0819     const cpumask_t         *cpus_ptr;
0820     cpumask_t           *user_cpus_ptr;
0821     cpumask_t           cpus_mask;
0822     void                *migration_pending;
0823 #ifdef CONFIG_SMP
0824     unsigned short          migration_disabled;
0825 #endif
0826     unsigned short          migration_flags;
0827 
0828 #ifdef CONFIG_PREEMPT_RCU
0829     int             rcu_read_lock_nesting;
0830     union rcu_special       rcu_read_unlock_special;
0831     struct list_head        rcu_node_entry;
0832     struct rcu_node         *rcu_blocked_node;
0833 #endif /* #ifdef CONFIG_PREEMPT_RCU */
0834 
0835 #ifdef CONFIG_TASKS_RCU
0836     unsigned long           rcu_tasks_nvcsw;
0837     u8              rcu_tasks_holdout;
0838     u8              rcu_tasks_idx;
0839     int             rcu_tasks_idle_cpu;
0840     struct list_head        rcu_tasks_holdout_list;
0841 #endif /* #ifdef CONFIG_TASKS_RCU */
0842 
0843 #ifdef CONFIG_TASKS_TRACE_RCU
0844     int             trc_reader_nesting;
0845     int             trc_ipi_to_cpu;
0846     union rcu_special       trc_reader_special;
0847     struct list_head        trc_holdout_list;
0848     struct list_head        trc_blkd_node;
0849     int             trc_blkd_cpu;
0850 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
0851 
0852     struct sched_info       sched_info;
0853 
0854     struct list_head        tasks;
0855 #ifdef CONFIG_SMP
0856     struct plist_node       pushable_tasks;
0857     struct rb_node          pushable_dl_tasks;
0858 #endif
0859 
0860     struct mm_struct        *mm;
0861     struct mm_struct        *active_mm;
0862 
0863     /* Per-thread vma caching: */
0864     struct vmacache         vmacache;
0865 
0866 #ifdef SPLIT_RSS_COUNTING
0867     struct task_rss_stat        rss_stat;
0868 #endif
0869     int             exit_state;
0870     int             exit_code;
0871     int             exit_signal;
0872     /* The signal sent when the parent dies: */
0873     int             pdeath_signal;
0874     /* JOBCTL_*, siglock protected: */
0875     unsigned long           jobctl;
0876 
0877     /* Used for emulating ABI behavior of previous Linux versions: */
0878     unsigned int            personality;
0879 
0880     /* Scheduler bits, serialized by scheduler locks: */
0881     unsigned            sched_reset_on_fork:1;
0882     unsigned            sched_contributes_to_load:1;
0883     unsigned            sched_migrated:1;
0884 #ifdef CONFIG_PSI
0885     unsigned            sched_psi_wake_requeue:1;
0886 #endif
0887 
0888     /* Force alignment to the next boundary: */
0889     unsigned            :0;
0890 
0891     /* Unserialized, strictly 'current' */
0892 
0893     /*
0894      * This field must not be in the scheduler word above due to wakelist
0895      * queueing no longer being serialized by p->on_cpu. However:
0896      *
0897      * p->XXX = X;          ttwu()
0898      * schedule()             if (p->on_rq && ..) // false
0899      *   smp_mb__after_spinlock();    if (smp_load_acquire(&p->on_cpu) && //true
0900      *   deactivate_task()            ttwu_queue_wakelist())
0901      *     p->on_rq = 0;            p->sched_remote_wakeup = Y;
0902      *
0903      * guarantees all stores of 'current' are visible before
0904      * ->sched_remote_wakeup gets used, so it can be in this word.
0905      */
0906     unsigned            sched_remote_wakeup:1;
0907 
0908     /* Bit to tell LSMs we're in execve(): */
0909     unsigned            in_execve:1;
0910     unsigned            in_iowait:1;
0911 #ifndef TIF_RESTORE_SIGMASK
0912     unsigned            restore_sigmask:1;
0913 #endif
0914 #ifdef CONFIG_MEMCG
0915     unsigned            in_user_fault:1;
0916 #endif
0917 #ifdef CONFIG_COMPAT_BRK
0918     unsigned            brk_randomized:1;
0919 #endif
0920 #ifdef CONFIG_CGROUPS
0921     /* disallow userland-initiated cgroup migration */
0922     unsigned            no_cgroup_migration:1;
0923     /* task is frozen/stopped (used by the cgroup freezer) */
0924     unsigned            frozen:1;
0925 #endif
0926 #ifdef CONFIG_BLK_CGROUP
0927     unsigned            use_memdelay:1;
0928 #endif
0929 #ifdef CONFIG_PSI
0930     /* Stalled due to lack of memory */
0931     unsigned            in_memstall:1;
0932 #endif
0933 #ifdef CONFIG_PAGE_OWNER
0934     /* Used by page_owner=on to detect recursion in page tracking. */
0935     unsigned            in_page_owner:1;
0936 #endif
0937 #ifdef CONFIG_EVENTFD
0938     /* Recursion prevention for eventfd_signal() */
0939     unsigned            in_eventfd_signal:1;
0940 #endif
0941 #ifdef CONFIG_IOMMU_SVA
0942     unsigned            pasid_activated:1;
0943 #endif
0944 #ifdef  CONFIG_CPU_SUP_INTEL
0945     unsigned            reported_split_lock:1;
0946 #endif
0947 
0948     unsigned long           atomic_flags; /* Flags requiring atomic access. */
0949 
0950     struct restart_block        restart_block;
0951 
0952     pid_t               pid;
0953     pid_t               tgid;
0954 
0955 #ifdef CONFIG_STACKPROTECTOR
0956     /* Canary value for the -fstack-protector GCC feature: */
0957     unsigned long           stack_canary;
0958 #endif
0959     /*
0960      * Pointers to the (original) parent process, youngest child, younger sibling,
0961      * older sibling, respectively.  (p->father can be replaced with
0962      * p->real_parent->pid)
0963      */
0964 
0965     /* Real parent process: */
0966     struct task_struct __rcu    *real_parent;
0967 
0968     /* Recipient of SIGCHLD, wait4() reports: */
0969     struct task_struct __rcu    *parent;
0970 
0971     /*
0972      * Children/sibling form the list of natural children:
0973      */
0974     struct list_head        children;
0975     struct list_head        sibling;
0976     struct task_struct      *group_leader;
0977 
0978     /*
0979      * 'ptraced' is the list of tasks this task is using ptrace() on.
0980      *
0981      * This includes both natural children and PTRACE_ATTACH targets.
0982      * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
0983      */
0984     struct list_head        ptraced;
0985     struct list_head        ptrace_entry;
0986 
0987     /* PID/PID hash table linkage. */
0988     struct pid          *thread_pid;
0989     struct hlist_node       pid_links[PIDTYPE_MAX];
0990     struct list_head        thread_group;
0991     struct list_head        thread_node;
0992 
0993     struct completion       *vfork_done;
0994 
0995     /* CLONE_CHILD_SETTID: */
0996     int __user          *set_child_tid;
0997 
0998     /* CLONE_CHILD_CLEARTID: */
0999     int __user          *clear_child_tid;
1000 
1001     /* PF_KTHREAD | PF_IO_WORKER */
1002     void                *worker_private;
1003 
1004     u64             utime;
1005     u64             stime;
1006 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1007     u64             utimescaled;
1008     u64             stimescaled;
1009 #endif
1010     u64             gtime;
1011     struct prev_cputime     prev_cputime;
1012 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1013     struct vtime            vtime;
1014 #endif
1015 
1016 #ifdef CONFIG_NO_HZ_FULL
1017     atomic_t            tick_dep_mask;
1018 #endif
1019     /* Context switch counts: */
1020     unsigned long           nvcsw;
1021     unsigned long           nivcsw;
1022 
1023     /* Monotonic time in nsecs: */
1024     u64             start_time;
1025 
1026     /* Boot based time in nsecs: */
1027     u64             start_boottime;
1028 
1029     /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1030     unsigned long           min_flt;
1031     unsigned long           maj_flt;
1032 
1033     /* Empty if CONFIG_POSIX_CPUTIMERS=n */
1034     struct posix_cputimers      posix_cputimers;
1035 
1036 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1037     struct posix_cputimers_work posix_cputimers_work;
1038 #endif
1039 
1040     /* Process credentials: */
1041 
1042     /* Tracer's credentials at attach: */
1043     const struct cred __rcu     *ptracer_cred;
1044 
1045     /* Objective and real subjective task credentials (COW): */
1046     const struct cred __rcu     *real_cred;
1047 
1048     /* Effective (overridable) subjective task credentials (COW): */
1049     const struct cred __rcu     *cred;
1050 
1051 #ifdef CONFIG_KEYS
1052     /* Cached requested key. */
1053     struct key          *cached_requested_key;
1054 #endif
1055 
1056     /*
1057      * executable name, excluding path.
1058      *
1059      * - normally initialized setup_new_exec()
1060      * - access it with [gs]et_task_comm()
1061      * - lock it with task_lock()
1062      */
1063     char                comm[TASK_COMM_LEN];
1064 
1065     struct nameidata        *nameidata;
1066 
1067 #ifdef CONFIG_SYSVIPC
1068     struct sysv_sem         sysvsem;
1069     struct sysv_shm         sysvshm;
1070 #endif
1071 #ifdef CONFIG_DETECT_HUNG_TASK
1072     unsigned long           last_switch_count;
1073     unsigned long           last_switch_time;
1074 #endif
1075     /* Filesystem information: */
1076     struct fs_struct        *fs;
1077 
1078     /* Open file information: */
1079     struct files_struct     *files;
1080 
1081 #ifdef CONFIG_IO_URING
1082     struct io_uring_task        *io_uring;
1083 #endif
1084 
1085     /* Namespaces: */
1086     struct nsproxy          *nsproxy;
1087 
1088     /* Signal handlers: */
1089     struct signal_struct        *signal;
1090     struct sighand_struct __rcu     *sighand;
1091     sigset_t            blocked;
1092     sigset_t            real_blocked;
1093     /* Restored if set_restore_sigmask() was used: */
1094     sigset_t            saved_sigmask;
1095     struct sigpending       pending;
1096     unsigned long           sas_ss_sp;
1097     size_t              sas_ss_size;
1098     unsigned int            sas_ss_flags;
1099 
1100     struct callback_head        *task_works;
1101 
1102 #ifdef CONFIG_AUDIT
1103 #ifdef CONFIG_AUDITSYSCALL
1104     struct audit_context        *audit_context;
1105 #endif
1106     kuid_t              loginuid;
1107     unsigned int            sessionid;
1108 #endif
1109     struct seccomp          seccomp;
1110     struct syscall_user_dispatch    syscall_dispatch;
1111 
1112     /* Thread group tracking: */
1113     u64             parent_exec_id;
1114     u64             self_exec_id;
1115 
1116     /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1117     spinlock_t          alloc_lock;
1118 
1119     /* Protection of the PI data structures: */
1120     raw_spinlock_t          pi_lock;
1121 
1122     struct wake_q_node      wake_q;
1123 
1124 #ifdef CONFIG_RT_MUTEXES
1125     /* PI waiters blocked on a rt_mutex held by this task: */
1126     struct rb_root_cached       pi_waiters;
1127     /* Updated under owner's pi_lock and rq lock */
1128     struct task_struct      *pi_top_task;
1129     /* Deadlock detection and priority inheritance handling: */
1130     struct rt_mutex_waiter      *pi_blocked_on;
1131 #endif
1132 
1133 #ifdef CONFIG_DEBUG_MUTEXES
1134     /* Mutex deadlock detection: */
1135     struct mutex_waiter     *blocked_on;
1136 #endif
1137 
1138 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1139     int             non_block_count;
1140 #endif
1141 
1142 #ifdef CONFIG_TRACE_IRQFLAGS
1143     struct irqtrace_events      irqtrace;
1144     unsigned int            hardirq_threaded;
1145     u64             hardirq_chain_key;
1146     int             softirqs_enabled;
1147     int             softirq_context;
1148     int             irq_config;
1149 #endif
1150 #ifdef CONFIG_PREEMPT_RT
1151     int             softirq_disable_cnt;
1152 #endif
1153 
1154 #ifdef CONFIG_LOCKDEP
1155 # define MAX_LOCK_DEPTH         48UL
1156     u64             curr_chain_key;
1157     int             lockdep_depth;
1158     unsigned int            lockdep_recursion;
1159     struct held_lock        held_locks[MAX_LOCK_DEPTH];
1160 #endif
1161 
1162 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1163     unsigned int            in_ubsan;
1164 #endif
1165 
1166     /* Journalling filesystem info: */
1167     void                *journal_info;
1168 
1169     /* Stacked block device info: */
1170     struct bio_list         *bio_list;
1171 
1172     /* Stack plugging: */
1173     struct blk_plug         *plug;
1174 
1175     /* VM state: */
1176     struct reclaim_state        *reclaim_state;
1177 
1178     struct backing_dev_info     *backing_dev_info;
1179 
1180     struct io_context       *io_context;
1181 
1182 #ifdef CONFIG_COMPACTION
1183     struct capture_control      *capture_control;
1184 #endif
1185     /* Ptrace state: */
1186     unsigned long           ptrace_message;
1187     kernel_siginfo_t        *last_siginfo;
1188 
1189     struct task_io_accounting   ioac;
1190 #ifdef CONFIG_PSI
1191     /* Pressure stall state */
1192     unsigned int            psi_flags;
1193 #endif
1194 #ifdef CONFIG_TASK_XACCT
1195     /* Accumulated RSS usage: */
1196     u64             acct_rss_mem1;
1197     /* Accumulated virtual memory usage: */
1198     u64             acct_vm_mem1;
1199     /* stime + utime since last update: */
1200     u64             acct_timexpd;
1201 #endif
1202 #ifdef CONFIG_CPUSETS
1203     /* Protected by ->alloc_lock: */
1204     nodemask_t          mems_allowed;
1205     /* Sequence number to catch updates: */
1206     seqcount_spinlock_t     mems_allowed_seq;
1207     int             cpuset_mem_spread_rotor;
1208     int             cpuset_slab_spread_rotor;
1209 #endif
1210 #ifdef CONFIG_CGROUPS
1211     /* Control Group info protected by css_set_lock: */
1212     struct css_set __rcu        *cgroups;
1213     /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1214     struct list_head        cg_list;
1215 #endif
1216 #ifdef CONFIG_X86_CPU_RESCTRL
1217     u32             closid;
1218     u32             rmid;
1219 #endif
1220 #ifdef CONFIG_FUTEX
1221     struct robust_list_head __user  *robust_list;
1222 #ifdef CONFIG_COMPAT
1223     struct compat_robust_list_head __user *compat_robust_list;
1224 #endif
1225     struct list_head        pi_state_list;
1226     struct futex_pi_state       *pi_state_cache;
1227     struct mutex            futex_exit_mutex;
1228     unsigned int            futex_state;
1229 #endif
1230 #ifdef CONFIG_PERF_EVENTS
1231     struct perf_event_context   *perf_event_ctxp[perf_nr_task_contexts];
1232     struct mutex            perf_event_mutex;
1233     struct list_head        perf_event_list;
1234 #endif
1235 #ifdef CONFIG_DEBUG_PREEMPT
1236     unsigned long           preempt_disable_ip;
1237 #endif
1238 #ifdef CONFIG_NUMA
1239     /* Protected by alloc_lock: */
1240     struct mempolicy        *mempolicy;
1241     short               il_prev;
1242     short               pref_node_fork;
1243 #endif
1244 #ifdef CONFIG_NUMA_BALANCING
1245     int             numa_scan_seq;
1246     unsigned int            numa_scan_period;
1247     unsigned int            numa_scan_period_max;
1248     int             numa_preferred_nid;
1249     unsigned long           numa_migrate_retry;
1250     /* Migration stamp: */
1251     u64             node_stamp;
1252     u64             last_task_numa_placement;
1253     u64             last_sum_exec_runtime;
1254     struct callback_head        numa_work;
1255 
1256     /*
1257      * This pointer is only modified for current in syscall and
1258      * pagefault context (and for tasks being destroyed), so it can be read
1259      * from any of the following contexts:
1260      *  - RCU read-side critical section
1261      *  - current->numa_group from everywhere
1262      *  - task's runqueue locked, task not running
1263      */
1264     struct numa_group __rcu     *numa_group;
1265 
1266     /*
1267      * numa_faults is an array split into four regions:
1268      * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1269      * in this precise order.
1270      *
1271      * faults_memory: Exponential decaying average of faults on a per-node
1272      * basis. Scheduling placement decisions are made based on these
1273      * counts. The values remain static for the duration of a PTE scan.
1274      * faults_cpu: Track the nodes the process was running on when a NUMA
1275      * hinting fault was incurred.
1276      * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1277      * during the current scan window. When the scan completes, the counts
1278      * in faults_memory and faults_cpu decay and these values are copied.
1279      */
1280     unsigned long           *numa_faults;
1281     unsigned long           total_numa_faults;
1282 
1283     /*
1284      * numa_faults_locality tracks if faults recorded during the last
1285      * scan window were remote/local or failed to migrate. The task scan
1286      * period is adapted based on the locality of the faults with different
1287      * weights depending on whether they were shared or private faults
1288      */
1289     unsigned long           numa_faults_locality[3];
1290 
1291     unsigned long           numa_pages_migrated;
1292 #endif /* CONFIG_NUMA_BALANCING */
1293 
1294 #ifdef CONFIG_RSEQ
1295     struct rseq __user *rseq;
1296     u32 rseq_sig;
1297     /*
1298      * RmW on rseq_event_mask must be performed atomically
1299      * with respect to preemption.
1300      */
1301     unsigned long rseq_event_mask;
1302 #endif
1303 
1304     struct tlbflush_unmap_batch tlb_ubc;
1305 
1306     union {
1307         refcount_t      rcu_users;
1308         struct rcu_head     rcu;
1309     };
1310 
1311     /* Cache last used pipe for splice(): */
1312     struct pipe_inode_info      *splice_pipe;
1313 
1314     struct page_frag        task_frag;
1315 
1316 #ifdef CONFIG_TASK_DELAY_ACCT
1317     struct task_delay_info      *delays;
1318 #endif
1319 
1320 #ifdef CONFIG_FAULT_INJECTION
1321     int             make_it_fail;
1322     unsigned int            fail_nth;
1323 #endif
1324     /*
1325      * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1326      * balance_dirty_pages() for a dirty throttling pause:
1327      */
1328     int             nr_dirtied;
1329     int             nr_dirtied_pause;
1330     /* Start of a write-and-pause period: */
1331     unsigned long           dirty_paused_when;
1332 
1333 #ifdef CONFIG_LATENCYTOP
1334     int             latency_record_count;
1335     struct latency_record       latency_record[LT_SAVECOUNT];
1336 #endif
1337     /*
1338      * Time slack values; these are used to round up poll() and
1339      * select() etc timeout values. These are in nanoseconds.
1340      */
1341     u64             timer_slack_ns;
1342     u64             default_timer_slack_ns;
1343 
1344 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1345     unsigned int            kasan_depth;
1346 #endif
1347 
1348 #ifdef CONFIG_KCSAN
1349     struct kcsan_ctx        kcsan_ctx;
1350 #ifdef CONFIG_TRACE_IRQFLAGS
1351     struct irqtrace_events      kcsan_save_irqtrace;
1352 #endif
1353 #ifdef CONFIG_KCSAN_WEAK_MEMORY
1354     int             kcsan_stack_depth;
1355 #endif
1356 #endif
1357 
1358 #if IS_ENABLED(CONFIG_KUNIT)
1359     struct kunit            *kunit_test;
1360 #endif
1361 
1362 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1363     /* Index of current stored address in ret_stack: */
1364     int             curr_ret_stack;
1365     int             curr_ret_depth;
1366 
1367     /* Stack of return addresses for return function tracing: */
1368     struct ftrace_ret_stack     *ret_stack;
1369 
1370     /* Timestamp for last schedule: */
1371     unsigned long long      ftrace_timestamp;
1372 
1373     /*
1374      * Number of functions that haven't been traced
1375      * because of depth overrun:
1376      */
1377     atomic_t            trace_overrun;
1378 
1379     /* Pause tracing: */
1380     atomic_t            tracing_graph_pause;
1381 #endif
1382 
1383 #ifdef CONFIG_TRACING
1384     /* State flags for use by tracers: */
1385     unsigned long           trace;
1386 
1387     /* Bitmask and counter of trace recursion: */
1388     unsigned long           trace_recursion;
1389 #endif /* CONFIG_TRACING */
1390 
1391 #ifdef CONFIG_KCOV
1392     /* See kernel/kcov.c for more details. */
1393 
1394     /* Coverage collection mode enabled for this task (0 if disabled): */
1395     unsigned int            kcov_mode;
1396 
1397     /* Size of the kcov_area: */
1398     unsigned int            kcov_size;
1399 
1400     /* Buffer for coverage collection: */
1401     void                *kcov_area;
1402 
1403     /* KCOV descriptor wired with this task or NULL: */
1404     struct kcov         *kcov;
1405 
1406     /* KCOV common handle for remote coverage collection: */
1407     u64             kcov_handle;
1408 
1409     /* KCOV sequence number: */
1410     int             kcov_sequence;
1411 
1412     /* Collect coverage from softirq context: */
1413     unsigned int            kcov_softirq;
1414 #endif
1415 
1416 #ifdef CONFIG_MEMCG
1417     struct mem_cgroup       *memcg_in_oom;
1418     gfp_t               memcg_oom_gfp_mask;
1419     int             memcg_oom_order;
1420 
1421     /* Number of pages to reclaim on returning to userland: */
1422     unsigned int            memcg_nr_pages_over_high;
1423 
1424     /* Used by memcontrol for targeted memcg charge: */
1425     struct mem_cgroup       *active_memcg;
1426 #endif
1427 
1428 #ifdef CONFIG_BLK_CGROUP
1429     struct request_queue        *throttle_queue;
1430 #endif
1431 
1432 #ifdef CONFIG_UPROBES
1433     struct uprobe_task      *utask;
1434 #endif
1435 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1436     unsigned int            sequential_io;
1437     unsigned int            sequential_io_avg;
1438 #endif
1439     struct kmap_ctrl        kmap_ctrl;
1440 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1441     unsigned long           task_state_change;
1442 # ifdef CONFIG_PREEMPT_RT
1443     unsigned long           saved_state_change;
1444 # endif
1445 #endif
1446     int             pagefault_disabled;
1447 #ifdef CONFIG_MMU
1448     struct task_struct      *oom_reaper_list;
1449     struct timer_list       oom_reaper_timer;
1450 #endif
1451 #ifdef CONFIG_VMAP_STACK
1452     struct vm_struct        *stack_vm_area;
1453 #endif
1454 #ifdef CONFIG_THREAD_INFO_IN_TASK
1455     /* A live task holds one reference: */
1456     refcount_t          stack_refcount;
1457 #endif
1458 #ifdef CONFIG_LIVEPATCH
1459     int patch_state;
1460 #endif
1461 #ifdef CONFIG_SECURITY
1462     /* Used by LSM modules for access restriction: */
1463     void                *security;
1464 #endif
1465 #ifdef CONFIG_BPF_SYSCALL
1466     /* Used by BPF task local storage */
1467     struct bpf_local_storage __rcu  *bpf_storage;
1468     /* Used for BPF run context */
1469     struct bpf_run_ctx      *bpf_ctx;
1470 #endif
1471 
1472 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1473     unsigned long           lowest_stack;
1474     unsigned long           prev_lowest_stack;
1475 #endif
1476 
1477 #ifdef CONFIG_X86_MCE
1478     void __user         *mce_vaddr;
1479     __u64               mce_kflags;
1480     u64             mce_addr;
1481     __u64               mce_ripv : 1,
1482                     mce_whole_page : 1,
1483                     __mce_reserved : 62;
1484     struct callback_head        mce_kill_me;
1485     int             mce_count;
1486 #endif
1487 
1488 #ifdef CONFIG_KRETPROBES
1489     struct llist_head               kretprobe_instances;
1490 #endif
1491 #ifdef CONFIG_RETHOOK
1492     struct llist_head               rethooks;
1493 #endif
1494 
1495 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1496     /*
1497      * If L1D flush is supported on mm context switch
1498      * then we use this callback head to queue kill work
1499      * to kill tasks that are not running on SMT disabled
1500      * cores
1501      */
1502     struct callback_head        l1d_flush_kill;
1503 #endif
1504 
1505 #ifdef CONFIG_RV
1506     /*
1507      * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1508      * If we find justification for more monitors, we can think
1509      * about adding more or developing a dynamic method. So far,
1510      * none of these are justified.
1511      */
1512     union rv_task_monitor       rv[RV_PER_TASK_MONITORS];
1513 #endif
1514 
1515     /*
1516      * New fields for task_struct should be added above here, so that
1517      * they are included in the randomized portion of task_struct.
1518      */
1519     randomized_struct_fields_end
1520 
1521     /* CPU-specific state of this task: */
1522     struct thread_struct        thread;
1523 
1524     /*
1525      * WARNING: on x86, 'thread_struct' contains a variable-sized
1526      * structure.  It *MUST* be at the end of 'task_struct'.
1527      *
1528      * Do not put anything below here!
1529      */
1530 };
1531 
1532 static inline struct pid *task_pid(struct task_struct *task)
1533 {
1534     return task->thread_pid;
1535 }
1536 
1537 /*
1538  * the helpers to get the task's different pids as they are seen
1539  * from various namespaces
1540  *
1541  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1542  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1543  *                     current.
1544  * task_xid_nr_ns()  : id seen from the ns specified;
1545  *
1546  * see also pid_nr() etc in include/linux/pid.h
1547  */
1548 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1549 
1550 static inline pid_t task_pid_nr(struct task_struct *tsk)
1551 {
1552     return tsk->pid;
1553 }
1554 
1555 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1556 {
1557     return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1558 }
1559 
1560 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1561 {
1562     return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1563 }
1564 
1565 
1566 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1567 {
1568     return tsk->tgid;
1569 }
1570 
1571 /**
1572  * pid_alive - check that a task structure is not stale
1573  * @p: Task structure to be checked.
1574  *
1575  * Test if a process is not yet dead (at most zombie state)
1576  * If pid_alive fails, then pointers within the task structure
1577  * can be stale and must not be dereferenced.
1578  *
1579  * Return: 1 if the process is alive. 0 otherwise.
1580  */
1581 static inline int pid_alive(const struct task_struct *p)
1582 {
1583     return p->thread_pid != NULL;
1584 }
1585 
1586 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1587 {
1588     return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1589 }
1590 
1591 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1592 {
1593     return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1594 }
1595 
1596 
1597 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1598 {
1599     return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1600 }
1601 
1602 static inline pid_t task_session_vnr(struct task_struct *tsk)
1603 {
1604     return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1605 }
1606 
1607 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1608 {
1609     return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1610 }
1611 
1612 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1613 {
1614     return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1615 }
1616 
1617 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1618 {
1619     pid_t pid = 0;
1620 
1621     rcu_read_lock();
1622     if (pid_alive(tsk))
1623         pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1624     rcu_read_unlock();
1625 
1626     return pid;
1627 }
1628 
1629 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1630 {
1631     return task_ppid_nr_ns(tsk, &init_pid_ns);
1632 }
1633 
1634 /* Obsolete, do not use: */
1635 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1636 {
1637     return task_pgrp_nr_ns(tsk, &init_pid_ns);
1638 }
1639 
1640 #define TASK_REPORT_IDLE    (TASK_REPORT + 1)
1641 #define TASK_REPORT_MAX     (TASK_REPORT_IDLE << 1)
1642 
1643 static inline unsigned int __task_state_index(unsigned int tsk_state,
1644                           unsigned int tsk_exit_state)
1645 {
1646     unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
1647 
1648     BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1649 
1650     if (tsk_state == TASK_IDLE)
1651         state = TASK_REPORT_IDLE;
1652 
1653     /*
1654      * We're lying here, but rather than expose a completely new task state
1655      * to userspace, we can make this appear as if the task has gone through
1656      * a regular rt_mutex_lock() call.
1657      */
1658     if (tsk_state == TASK_RTLOCK_WAIT)
1659         state = TASK_UNINTERRUPTIBLE;
1660 
1661     return fls(state);
1662 }
1663 
1664 static inline unsigned int task_state_index(struct task_struct *tsk)
1665 {
1666     return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1667 }
1668 
1669 static inline char task_index_to_char(unsigned int state)
1670 {
1671     static const char state_char[] = "RSDTtXZPI";
1672 
1673     BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1674 
1675     return state_char[state];
1676 }
1677 
1678 static inline char task_state_to_char(struct task_struct *tsk)
1679 {
1680     return task_index_to_char(task_state_index(tsk));
1681 }
1682 
1683 /**
1684  * is_global_init - check if a task structure is init. Since init
1685  * is free to have sub-threads we need to check tgid.
1686  * @tsk: Task structure to be checked.
1687  *
1688  * Check if a task structure is the first user space task the kernel created.
1689  *
1690  * Return: 1 if the task structure is init. 0 otherwise.
1691  */
1692 static inline int is_global_init(struct task_struct *tsk)
1693 {
1694     return task_tgid_nr(tsk) == 1;
1695 }
1696 
1697 extern struct pid *cad_pid;
1698 
1699 /*
1700  * Per process flags
1701  */
1702 #define PF_VCPU         0x00000001  /* I'm a virtual CPU */
1703 #define PF_IDLE         0x00000002  /* I am an IDLE thread */
1704 #define PF_EXITING      0x00000004  /* Getting shut down */
1705 #define PF_POSTCOREDUMP     0x00000008  /* Coredumps should ignore this task */
1706 #define PF_IO_WORKER        0x00000010  /* Task is an IO worker */
1707 #define PF_WQ_WORKER        0x00000020  /* I'm a workqueue worker */
1708 #define PF_FORKNOEXEC       0x00000040  /* Forked but didn't exec */
1709 #define PF_MCE_PROCESS      0x00000080      /* Process policy on mce errors */
1710 #define PF_SUPERPRIV        0x00000100  /* Used super-user privileges */
1711 #define PF_DUMPCORE     0x00000200  /* Dumped core */
1712 #define PF_SIGNALED     0x00000400  /* Killed by a signal */
1713 #define PF_MEMALLOC     0x00000800  /* Allocating memory */
1714 #define PF_NPROC_EXCEEDED   0x00001000  /* set_user() noticed that RLIMIT_NPROC was exceeded */
1715 #define PF_USED_MATH        0x00002000  /* If unset the fpu must be initialized before use */
1716 #define PF_NOFREEZE     0x00008000  /* This thread should not be frozen */
1717 #define PF_FROZEN       0x00010000  /* Frozen for system suspend */
1718 #define PF_KSWAPD       0x00020000  /* I am kswapd */
1719 #define PF_MEMALLOC_NOFS    0x00040000  /* All allocation requests will inherit GFP_NOFS */
1720 #define PF_MEMALLOC_NOIO    0x00080000  /* All allocation requests will inherit GFP_NOIO */
1721 #define PF_LOCAL_THROTTLE   0x00100000  /* Throttle writes only against the bdi I write to,
1722                          * I am cleaning dirty pages from some other bdi. */
1723 #define PF_KTHREAD      0x00200000  /* I am a kernel thread */
1724 #define PF_RANDOMIZE        0x00400000  /* Randomize virtual address space */
1725 #define PF_NO_SETAFFINITY   0x04000000  /* Userland is not allowed to meddle with cpus_mask */
1726 #define PF_MCE_EARLY        0x08000000      /* Early kill for mce process policy */
1727 #define PF_MEMALLOC_PIN     0x10000000  /* Allocation context constrained to zones which allow long term pinning. */
1728 #define PF_FREEZER_SKIP     0x40000000  /* Freezer should not count it as freezable */
1729 #define PF_SUSPEND_TASK     0x80000000      /* This thread called freeze_processes() and should not be frozen */
1730 
1731 /*
1732  * Only the _current_ task can read/write to tsk->flags, but other
1733  * tasks can access tsk->flags in readonly mode for example
1734  * with tsk_used_math (like during threaded core dumping).
1735  * There is however an exception to this rule during ptrace
1736  * or during fork: the ptracer task is allowed to write to the
1737  * child->flags of its traced child (same goes for fork, the parent
1738  * can write to the child->flags), because we're guaranteed the
1739  * child is not running and in turn not changing child->flags
1740  * at the same time the parent does it.
1741  */
1742 #define clear_stopped_child_used_math(child)    do { (child)->flags &= ~PF_USED_MATH; } while (0)
1743 #define set_stopped_child_used_math(child)  do { (child)->flags |= PF_USED_MATH; } while (0)
1744 #define clear_used_math()           clear_stopped_child_used_math(current)
1745 #define set_used_math()             set_stopped_child_used_math(current)
1746 
1747 #define conditional_stopped_child_used_math(condition, child) \
1748     do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1749 
1750 #define conditional_used_math(condition)    conditional_stopped_child_used_math(condition, current)
1751 
1752 #define copy_to_stopped_child_used_math(child) \
1753     do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1754 
1755 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1756 #define tsk_used_math(p)            ((p)->flags & PF_USED_MATH)
1757 #define used_math()             tsk_used_math(current)
1758 
1759 static __always_inline bool is_percpu_thread(void)
1760 {
1761 #ifdef CONFIG_SMP
1762     return (current->flags & PF_NO_SETAFFINITY) &&
1763         (current->nr_cpus_allowed  == 1);
1764 #else
1765     return true;
1766 #endif
1767 }
1768 
1769 /* Per-process atomic flags. */
1770 #define PFA_NO_NEW_PRIVS        0   /* May not gain new privileges. */
1771 #define PFA_SPREAD_PAGE         1   /* Spread page cache over cpuset */
1772 #define PFA_SPREAD_SLAB         2   /* Spread some slab caches over cpuset */
1773 #define PFA_SPEC_SSB_DISABLE        3   /* Speculative Store Bypass disabled */
1774 #define PFA_SPEC_SSB_FORCE_DISABLE  4   /* Speculative Store Bypass force disabled*/
1775 #define PFA_SPEC_IB_DISABLE     5   /* Indirect branch speculation restricted */
1776 #define PFA_SPEC_IB_FORCE_DISABLE   6   /* Indirect branch speculation permanently restricted */
1777 #define PFA_SPEC_SSB_NOEXEC     7   /* Speculative Store Bypass clear on execve() */
1778 
1779 #define TASK_PFA_TEST(name, func)                   \
1780     static inline bool task_##func(struct task_struct *p)       \
1781     { return test_bit(PFA_##name, &p->atomic_flags); }
1782 
1783 #define TASK_PFA_SET(name, func)                    \
1784     static inline void task_set_##func(struct task_struct *p)   \
1785     { set_bit(PFA_##name, &p->atomic_flags); }
1786 
1787 #define TASK_PFA_CLEAR(name, func)                  \
1788     static inline void task_clear_##func(struct task_struct *p) \
1789     { clear_bit(PFA_##name, &p->atomic_flags); }
1790 
1791 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1792 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1793 
1794 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1795 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1796 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1797 
1798 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1799 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1800 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1801 
1802 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1803 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1804 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1805 
1806 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1807 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1808 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1809 
1810 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1811 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1812 
1813 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1814 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1815 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1816 
1817 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1818 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1819 
1820 static inline void
1821 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1822 {
1823     current->flags &= ~flags;
1824     current->flags |= orig_flags & flags;
1825 }
1826 
1827 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1828 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
1829 #ifdef CONFIG_SMP
1830 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1831 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1832 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1833 extern void release_user_cpus_ptr(struct task_struct *p);
1834 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1835 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1836 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1837 #else
1838 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1839 {
1840 }
1841 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1842 {
1843     if (!cpumask_test_cpu(0, new_mask))
1844         return -EINVAL;
1845     return 0;
1846 }
1847 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1848 {
1849     if (src->user_cpus_ptr)
1850         return -EINVAL;
1851     return 0;
1852 }
1853 static inline void release_user_cpus_ptr(struct task_struct *p)
1854 {
1855     WARN_ON(p->user_cpus_ptr);
1856 }
1857 
1858 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1859 {
1860     return 0;
1861 }
1862 #endif
1863 
1864 extern int yield_to(struct task_struct *p, bool preempt);
1865 extern void set_user_nice(struct task_struct *p, long nice);
1866 extern int task_prio(const struct task_struct *p);
1867 
1868 /**
1869  * task_nice - return the nice value of a given task.
1870  * @p: the task in question.
1871  *
1872  * Return: The nice value [ -20 ... 0 ... 19 ].
1873  */
1874 static inline int task_nice(const struct task_struct *p)
1875 {
1876     return PRIO_TO_NICE((p)->static_prio);
1877 }
1878 
1879 extern int can_nice(const struct task_struct *p, const int nice);
1880 extern int task_curr(const struct task_struct *p);
1881 extern int idle_cpu(int cpu);
1882 extern int available_idle_cpu(int cpu);
1883 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1884 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1885 extern void sched_set_fifo(struct task_struct *p);
1886 extern void sched_set_fifo_low(struct task_struct *p);
1887 extern void sched_set_normal(struct task_struct *p, int nice);
1888 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1889 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1890 extern struct task_struct *idle_task(int cpu);
1891 
1892 /**
1893  * is_idle_task - is the specified task an idle task?
1894  * @p: the task in question.
1895  *
1896  * Return: 1 if @p is an idle task. 0 otherwise.
1897  */
1898 static __always_inline bool is_idle_task(const struct task_struct *p)
1899 {
1900     return !!(p->flags & PF_IDLE);
1901 }
1902 
1903 extern struct task_struct *curr_task(int cpu);
1904 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1905 
1906 void yield(void);
1907 
1908 union thread_union {
1909 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1910     struct task_struct task;
1911 #endif
1912 #ifndef CONFIG_THREAD_INFO_IN_TASK
1913     struct thread_info thread_info;
1914 #endif
1915     unsigned long stack[THREAD_SIZE/sizeof(long)];
1916 };
1917 
1918 #ifndef CONFIG_THREAD_INFO_IN_TASK
1919 extern struct thread_info init_thread_info;
1920 #endif
1921 
1922 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1923 
1924 #ifdef CONFIG_THREAD_INFO_IN_TASK
1925 # define task_thread_info(task) (&(task)->thread_info)
1926 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1927 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1928 #endif
1929 
1930 /*
1931  * find a task by one of its numerical ids
1932  *
1933  * find_task_by_pid_ns():
1934  *      finds a task by its pid in the specified namespace
1935  * find_task_by_vpid():
1936  *      finds a task by its virtual pid
1937  *
1938  * see also find_vpid() etc in include/linux/pid.h
1939  */
1940 
1941 extern struct task_struct *find_task_by_vpid(pid_t nr);
1942 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1943 
1944 /*
1945  * find a task by its virtual pid and get the task struct
1946  */
1947 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1948 
1949 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1950 extern int wake_up_process(struct task_struct *tsk);
1951 extern void wake_up_new_task(struct task_struct *tsk);
1952 
1953 #ifdef CONFIG_SMP
1954 extern void kick_process(struct task_struct *tsk);
1955 #else
1956 static inline void kick_process(struct task_struct *tsk) { }
1957 #endif
1958 
1959 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1960 
1961 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1962 {
1963     __set_task_comm(tsk, from, false);
1964 }
1965 
1966 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1967 #define get_task_comm(buf, tsk) ({          \
1968     BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1969     __get_task_comm(buf, sizeof(buf), tsk);     \
1970 })
1971 
1972 #ifdef CONFIG_SMP
1973 static __always_inline void scheduler_ipi(void)
1974 {
1975     /*
1976      * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1977      * TIF_NEED_RESCHED remotely (for the first time) will also send
1978      * this IPI.
1979      */
1980     preempt_fold_need_resched();
1981 }
1982 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1983 #else
1984 static inline void scheduler_ipi(void) { }
1985 static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
1986 {
1987     return 1;
1988 }
1989 #endif
1990 
1991 /*
1992  * Set thread flags in other task's structures.
1993  * See asm/thread_info.h for TIF_xxxx flags available:
1994  */
1995 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1996 {
1997     set_ti_thread_flag(task_thread_info(tsk), flag);
1998 }
1999 
2000 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2001 {
2002     clear_ti_thread_flag(task_thread_info(tsk), flag);
2003 }
2004 
2005 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
2006                       bool value)
2007 {
2008     update_ti_thread_flag(task_thread_info(tsk), flag, value);
2009 }
2010 
2011 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2012 {
2013     return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2014 }
2015 
2016 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2017 {
2018     return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2019 }
2020 
2021 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2022 {
2023     return test_ti_thread_flag(task_thread_info(tsk), flag);
2024 }
2025 
2026 static inline void set_tsk_need_resched(struct task_struct *tsk)
2027 {
2028     set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2029 }
2030 
2031 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2032 {
2033     clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2034 }
2035 
2036 static inline int test_tsk_need_resched(struct task_struct *tsk)
2037 {
2038     return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2039 }
2040 
2041 /*
2042  * cond_resched() and cond_resched_lock(): latency reduction via
2043  * explicit rescheduling in places that are safe. The return
2044  * value indicates whether a reschedule was done in fact.
2045  * cond_resched_lock() will drop the spinlock before scheduling,
2046  */
2047 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2048 extern int __cond_resched(void);
2049 
2050 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2051 
2052 DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2053 
2054 static __always_inline int _cond_resched(void)
2055 {
2056     return static_call_mod(cond_resched)();
2057 }
2058 
2059 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2060 extern int dynamic_cond_resched(void);
2061 
2062 static __always_inline int _cond_resched(void)
2063 {
2064     return dynamic_cond_resched();
2065 }
2066 
2067 #else
2068 
2069 static inline int _cond_resched(void)
2070 {
2071     return __cond_resched();
2072 }
2073 
2074 #endif /* CONFIG_PREEMPT_DYNAMIC */
2075 
2076 #else
2077 
2078 static inline int _cond_resched(void) { return 0; }
2079 
2080 #endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
2081 
2082 #define cond_resched() ({           \
2083     __might_resched(__FILE__, __LINE__, 0); \
2084     _cond_resched();            \
2085 })
2086 
2087 extern int __cond_resched_lock(spinlock_t *lock);
2088 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2089 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2090 
2091 #define MIGHT_RESCHED_RCU_SHIFT     8
2092 #define MIGHT_RESCHED_PREEMPT_MASK  ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2093 
2094 #ifndef CONFIG_PREEMPT_RT
2095 /*
2096  * Non RT kernels have an elevated preempt count due to the held lock,
2097  * but are not allowed to be inside a RCU read side critical section
2098  */
2099 # define PREEMPT_LOCK_RESCHED_OFFSETS   PREEMPT_LOCK_OFFSET
2100 #else
2101 /*
2102  * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
2103  * cond_resched*lock() has to take that into account because it checks for
2104  * preempt_count() and rcu_preempt_depth().
2105  */
2106 # define PREEMPT_LOCK_RESCHED_OFFSETS   \
2107     (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2108 #endif
2109 
2110 #define cond_resched_lock(lock) ({                      \
2111     __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);  \
2112     __cond_resched_lock(lock);                      \
2113 })
2114 
2115 #define cond_resched_rwlock_read(lock) ({                   \
2116     __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);  \
2117     __cond_resched_rwlock_read(lock);                   \
2118 })
2119 
2120 #define cond_resched_rwlock_write(lock) ({                  \
2121     __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);  \
2122     __cond_resched_rwlock_write(lock);                  \
2123 })
2124 
2125 static inline void cond_resched_rcu(void)
2126 {
2127 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2128     rcu_read_unlock();
2129     cond_resched();
2130     rcu_read_lock();
2131 #endif
2132 }
2133 
2134 #ifdef CONFIG_PREEMPT_DYNAMIC
2135 
2136 extern bool preempt_model_none(void);
2137 extern bool preempt_model_voluntary(void);
2138 extern bool preempt_model_full(void);
2139 
2140 #else
2141 
2142 static inline bool preempt_model_none(void)
2143 {
2144     return IS_ENABLED(CONFIG_PREEMPT_NONE);
2145 }
2146 static inline bool preempt_model_voluntary(void)
2147 {
2148     return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
2149 }
2150 static inline bool preempt_model_full(void)
2151 {
2152     return IS_ENABLED(CONFIG_PREEMPT);
2153 }
2154 
2155 #endif
2156 
2157 static inline bool preempt_model_rt(void)
2158 {
2159     return IS_ENABLED(CONFIG_PREEMPT_RT);
2160 }
2161 
2162 /*
2163  * Does the preemption model allow non-cooperative preemption?
2164  *
2165  * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
2166  * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
2167  * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
2168  * PREEMPT_NONE model.
2169  */
2170 static inline bool preempt_model_preemptible(void)
2171 {
2172     return preempt_model_full() || preempt_model_rt();
2173 }
2174 
2175 /*
2176  * Does a critical section need to be broken due to another
2177  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
2178  * but a general need for low latency)
2179  */
2180 static inline int spin_needbreak(spinlock_t *lock)
2181 {
2182 #ifdef CONFIG_PREEMPTION
2183     return spin_is_contended(lock);
2184 #else
2185     return 0;
2186 #endif
2187 }
2188 
2189 /*
2190  * Check if a rwlock is contended.
2191  * Returns non-zero if there is another task waiting on the rwlock.
2192  * Returns zero if the lock is not contended or the system / underlying
2193  * rwlock implementation does not support contention detection.
2194  * Technically does not depend on CONFIG_PREEMPTION, but a general need
2195  * for low latency.
2196  */
2197 static inline int rwlock_needbreak(rwlock_t *lock)
2198 {
2199 #ifdef CONFIG_PREEMPTION
2200     return rwlock_is_contended(lock);
2201 #else
2202     return 0;
2203 #endif
2204 }
2205 
2206 static __always_inline bool need_resched(void)
2207 {
2208     return unlikely(tif_need_resched());
2209 }
2210 
2211 /*
2212  * Wrappers for p->thread_info->cpu access. No-op on UP.
2213  */
2214 #ifdef CONFIG_SMP
2215 
2216 static inline unsigned int task_cpu(const struct task_struct *p)
2217 {
2218     return READ_ONCE(task_thread_info(p)->cpu);
2219 }
2220 
2221 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2222 
2223 #else
2224 
2225 static inline unsigned int task_cpu(const struct task_struct *p)
2226 {
2227     return 0;
2228 }
2229 
2230 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2231 {
2232 }
2233 
2234 #endif /* CONFIG_SMP */
2235 
2236 extern bool sched_task_on_rq(struct task_struct *p);
2237 extern unsigned long get_wchan(struct task_struct *p);
2238 extern struct task_struct *cpu_curr_snapshot(int cpu);
2239 
2240 /*
2241  * In order to reduce various lock holder preemption latencies provide an
2242  * interface to see if a vCPU is currently running or not.
2243  *
2244  * This allows us to terminate optimistic spin loops and block, analogous to
2245  * the native optimistic spin heuristic of testing if the lock owner task is
2246  * running or not.
2247  */
2248 #ifndef vcpu_is_preempted
2249 static inline bool vcpu_is_preempted(int cpu)
2250 {
2251     return false;
2252 }
2253 #endif
2254 
2255 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2256 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2257 
2258 #ifndef TASK_SIZE_OF
2259 #define TASK_SIZE_OF(tsk)   TASK_SIZE
2260 #endif
2261 
2262 #ifdef CONFIG_SMP
2263 static inline bool owner_on_cpu(struct task_struct *owner)
2264 {
2265     /*
2266      * As lock holder preemption issue, we both skip spinning if
2267      * task is not on cpu or its cpu is preempted
2268      */
2269     return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2270 }
2271 
2272 /* Returns effective CPU energy utilization, as seen by the scheduler */
2273 unsigned long sched_cpu_util(int cpu);
2274 #endif /* CONFIG_SMP */
2275 
2276 #ifdef CONFIG_RSEQ
2277 
2278 /*
2279  * Map the event mask on the user-space ABI enum rseq_cs_flags
2280  * for direct mask checks.
2281  */
2282 enum rseq_event_mask_bits {
2283     RSEQ_EVENT_PREEMPT_BIT  = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2284     RSEQ_EVENT_SIGNAL_BIT   = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2285     RSEQ_EVENT_MIGRATE_BIT  = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2286 };
2287 
2288 enum rseq_event_mask {
2289     RSEQ_EVENT_PREEMPT  = (1U << RSEQ_EVENT_PREEMPT_BIT),
2290     RSEQ_EVENT_SIGNAL   = (1U << RSEQ_EVENT_SIGNAL_BIT),
2291     RSEQ_EVENT_MIGRATE  = (1U << RSEQ_EVENT_MIGRATE_BIT),
2292 };
2293 
2294 static inline void rseq_set_notify_resume(struct task_struct *t)
2295 {
2296     if (t->rseq)
2297         set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2298 }
2299 
2300 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2301 
2302 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2303                          struct pt_regs *regs)
2304 {
2305     if (current->rseq)
2306         __rseq_handle_notify_resume(ksig, regs);
2307 }
2308 
2309 static inline void rseq_signal_deliver(struct ksignal *ksig,
2310                        struct pt_regs *regs)
2311 {
2312     preempt_disable();
2313     __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
2314     preempt_enable();
2315     rseq_handle_notify_resume(ksig, regs);
2316 }
2317 
2318 /* rseq_preempt() requires preemption to be disabled. */
2319 static inline void rseq_preempt(struct task_struct *t)
2320 {
2321     __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2322     rseq_set_notify_resume(t);
2323 }
2324 
2325 /* rseq_migrate() requires preemption to be disabled. */
2326 static inline void rseq_migrate(struct task_struct *t)
2327 {
2328     __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2329     rseq_set_notify_resume(t);
2330 }
2331 
2332 /*
2333  * If parent process has a registered restartable sequences area, the
2334  * child inherits. Unregister rseq for a clone with CLONE_VM set.
2335  */
2336 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2337 {
2338     if (clone_flags & CLONE_VM) {
2339         t->rseq = NULL;
2340         t->rseq_sig = 0;
2341         t->rseq_event_mask = 0;
2342     } else {
2343         t->rseq = current->rseq;
2344         t->rseq_sig = current->rseq_sig;
2345         t->rseq_event_mask = current->rseq_event_mask;
2346     }
2347 }
2348 
2349 static inline void rseq_execve(struct task_struct *t)
2350 {
2351     t->rseq = NULL;
2352     t->rseq_sig = 0;
2353     t->rseq_event_mask = 0;
2354 }
2355 
2356 #else
2357 
2358 static inline void rseq_set_notify_resume(struct task_struct *t)
2359 {
2360 }
2361 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2362                          struct pt_regs *regs)
2363 {
2364 }
2365 static inline void rseq_signal_deliver(struct ksignal *ksig,
2366                        struct pt_regs *regs)
2367 {
2368 }
2369 static inline void rseq_preempt(struct task_struct *t)
2370 {
2371 }
2372 static inline void rseq_migrate(struct task_struct *t)
2373 {
2374 }
2375 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2376 {
2377 }
2378 static inline void rseq_execve(struct task_struct *t)
2379 {
2380 }
2381 
2382 #endif
2383 
2384 #ifdef CONFIG_DEBUG_RSEQ
2385 
2386 void rseq_syscall(struct pt_regs *regs);
2387 
2388 #else
2389 
2390 static inline void rseq_syscall(struct pt_regs *regs)
2391 {
2392 }
2393 
2394 #endif
2395 
2396 #ifdef CONFIG_SCHED_CORE
2397 extern void sched_core_free(struct task_struct *tsk);
2398 extern void sched_core_fork(struct task_struct *p);
2399 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2400                 unsigned long uaddr);
2401 #else
2402 static inline void sched_core_free(struct task_struct *tsk) { }
2403 static inline void sched_core_fork(struct task_struct *p) { }
2404 #endif
2405 
2406 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2407 
2408 #endif