0001
0002 #ifndef _LINUX_SCHED_H
0003 #define _LINUX_SCHED_H
0004
0005
0006
0007
0008
0009
0010 #include <uapi/linux/sched.h>
0011
0012 #include <asm/current.h>
0013
0014 #include <linux/pid.h>
0015 #include <linux/sem.h>
0016 #include <linux/shm.h>
0017 #include <linux/mutex.h>
0018 #include <linux/plist.h>
0019 #include <linux/hrtimer.h>
0020 #include <linux/irqflags.h>
0021 #include <linux/seccomp.h>
0022 #include <linux/nodemask.h>
0023 #include <linux/rcupdate.h>
0024 #include <linux/refcount.h>
0025 #include <linux/resource.h>
0026 #include <linux/latencytop.h>
0027 #include <linux/sched/prio.h>
0028 #include <linux/sched/types.h>
0029 #include <linux/signal_types.h>
0030 #include <linux/syscall_user_dispatch.h>
0031 #include <linux/mm_types_task.h>
0032 #include <linux/task_io_accounting.h>
0033 #include <linux/posix-timers.h>
0034 #include <linux/rseq.h>
0035 #include <linux/seqlock.h>
0036 #include <linux/kcsan.h>
0037 #include <linux/rv.h>
0038 #include <asm/kmap_size.h>
0039
0040
0041 struct audit_context;
0042 struct backing_dev_info;
0043 struct bio_list;
0044 struct blk_plug;
0045 struct bpf_local_storage;
0046 struct bpf_run_ctx;
0047 struct capture_control;
0048 struct cfs_rq;
0049 struct fs_struct;
0050 struct futex_pi_state;
0051 struct io_context;
0052 struct io_uring_task;
0053 struct mempolicy;
0054 struct nameidata;
0055 struct nsproxy;
0056 struct perf_event_context;
0057 struct pid_namespace;
0058 struct pipe_inode_info;
0059 struct rcu_node;
0060 struct reclaim_state;
0061 struct robust_list_head;
0062 struct root_domain;
0063 struct rq;
0064 struct sched_attr;
0065 struct sched_param;
0066 struct seq_file;
0067 struct sighand_struct;
0068 struct signal_struct;
0069 struct task_delay_info;
0070 struct task_group;
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 #define TASK_RUNNING 0x0000
0085 #define TASK_INTERRUPTIBLE 0x0001
0086 #define TASK_UNINTERRUPTIBLE 0x0002
0087 #define __TASK_STOPPED 0x0004
0088 #define __TASK_TRACED 0x0008
0089
0090 #define EXIT_DEAD 0x0010
0091 #define EXIT_ZOMBIE 0x0020
0092 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
0093
0094 #define TASK_PARKED 0x0040
0095 #define TASK_DEAD 0x0080
0096 #define TASK_WAKEKILL 0x0100
0097 #define TASK_WAKING 0x0200
0098 #define TASK_NOLOAD 0x0400
0099 #define TASK_NEW 0x0800
0100
0101 #define TASK_RTLOCK_WAIT 0x1000
0102 #define TASK_STATE_MAX 0x2000
0103
0104
0105 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
0106 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
0107 #define TASK_TRACED __TASK_TRACED
0108
0109 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
0110
0111
0112 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
0113
0114
0115 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
0116 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
0117 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
0118 TASK_PARKED)
0119
0120 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
0121
0122 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
0123 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
0124 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
0125
0126
0127
0128
0129
0130 #define is_special_task_state(state) \
0131 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
0132
0133 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
0134 # define debug_normal_state_change(state_value) \
0135 do { \
0136 WARN_ON_ONCE(is_special_task_state(state_value)); \
0137 current->task_state_change = _THIS_IP_; \
0138 } while (0)
0139
0140 # define debug_special_state_change(state_value) \
0141 do { \
0142 WARN_ON_ONCE(!is_special_task_state(state_value)); \
0143 current->task_state_change = _THIS_IP_; \
0144 } while (0)
0145
0146 # define debug_rtlock_wait_set_state() \
0147 do { \
0148 current->saved_state_change = current->task_state_change;\
0149 current->task_state_change = _THIS_IP_; \
0150 } while (0)
0151
0152 # define debug_rtlock_wait_restore_state() \
0153 do { \
0154 current->task_state_change = current->saved_state_change;\
0155 } while (0)
0156
0157 #else
0158 # define debug_normal_state_change(cond) do { } while (0)
0159 # define debug_special_state_change(cond) do { } while (0)
0160 # define debug_rtlock_wait_set_state() do { } while (0)
0161 # define debug_rtlock_wait_restore_state() do { } while (0)
0162 #endif
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 #define __set_current_state(state_value) \
0202 do { \
0203 debug_normal_state_change((state_value)); \
0204 WRITE_ONCE(current->__state, (state_value)); \
0205 } while (0)
0206
0207 #define set_current_state(state_value) \
0208 do { \
0209 debug_normal_state_change((state_value)); \
0210 smp_store_mb(current->__state, (state_value)); \
0211 } while (0)
0212
0213
0214
0215
0216
0217
0218
0219 #define set_special_state(state_value) \
0220 do { \
0221 unsigned long flags; \
0222 \
0223 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
0224 debug_special_state_change((state_value)); \
0225 WRITE_ONCE(current->__state, (state_value)); \
0226 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
0227 } while (0)
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 #define current_save_and_set_rtlock_wait_state() \
0255 do { \
0256 lockdep_assert_irqs_disabled(); \
0257 raw_spin_lock(¤t->pi_lock); \
0258 current->saved_state = current->__state; \
0259 debug_rtlock_wait_set_state(); \
0260 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
0261 raw_spin_unlock(¤t->pi_lock); \
0262 } while (0);
0263
0264 #define current_restore_rtlock_saved_state() \
0265 do { \
0266 lockdep_assert_irqs_disabled(); \
0267 raw_spin_lock(¤t->pi_lock); \
0268 debug_rtlock_wait_restore_state(); \
0269 WRITE_ONCE(current->__state, current->saved_state); \
0270 current->saved_state = TASK_RUNNING; \
0271 raw_spin_unlock(¤t->pi_lock); \
0272 } while (0);
0273
0274 #define get_current_state() READ_ONCE(current->__state)
0275
0276
0277
0278
0279
0280 enum {
0281 TASK_COMM_LEN = 16,
0282 };
0283
0284 extern void scheduler_tick(void);
0285
0286 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
0287
0288 extern long schedule_timeout(long timeout);
0289 extern long schedule_timeout_interruptible(long timeout);
0290 extern long schedule_timeout_killable(long timeout);
0291 extern long schedule_timeout_uninterruptible(long timeout);
0292 extern long schedule_timeout_idle(long timeout);
0293 asmlinkage void schedule(void);
0294 extern void schedule_preempt_disabled(void);
0295 asmlinkage void preempt_schedule_irq(void);
0296 #ifdef CONFIG_PREEMPT_RT
0297 extern void schedule_rtlock(void);
0298 #endif
0299
0300 extern int __must_check io_schedule_prepare(void);
0301 extern void io_schedule_finish(int token);
0302 extern long io_schedule_timeout(long timeout);
0303 extern void io_schedule(void);
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314 struct prev_cputime {
0315 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0316 u64 utime;
0317 u64 stime;
0318 raw_spinlock_t lock;
0319 #endif
0320 };
0321
0322 enum vtime_state {
0323
0324 VTIME_INACTIVE = 0,
0325
0326 VTIME_IDLE,
0327
0328 VTIME_SYS,
0329
0330 VTIME_USER,
0331
0332 VTIME_GUEST,
0333 };
0334
0335 struct vtime {
0336 seqcount_t seqcount;
0337 unsigned long long starttime;
0338 enum vtime_state state;
0339 unsigned int cpu;
0340 u64 utime;
0341 u64 stime;
0342 u64 gtime;
0343 };
0344
0345
0346
0347
0348
0349
0350
0351 enum uclamp_id {
0352 UCLAMP_MIN = 0,
0353 UCLAMP_MAX,
0354 UCLAMP_CNT
0355 };
0356
0357 #ifdef CONFIG_SMP
0358 extern struct root_domain def_root_domain;
0359 extern struct mutex sched_domains_mutex;
0360 #endif
0361
0362 struct sched_info {
0363 #ifdef CONFIG_SCHED_INFO
0364
0365
0366
0367 unsigned long pcount;
0368
0369
0370 unsigned long long run_delay;
0371
0372
0373
0374
0375 unsigned long long last_arrival;
0376
0377
0378 unsigned long long last_queued;
0379
0380 #endif
0381 };
0382
0383
0384
0385
0386
0387
0388
0389
0390 # define SCHED_FIXEDPOINT_SHIFT 10
0391 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
0392
0393
0394 # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
0395 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
0396
0397 struct load_weight {
0398 unsigned long weight;
0399 u32 inv_weight;
0400 };
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 struct util_est {
0432 unsigned int enqueued;
0433 unsigned int ewma;
0434 #define UTIL_EST_WEIGHT_SHIFT 2
0435 #define UTIL_AVG_UNCHANGED 0x80000000
0436 } __attribute__((__aligned__(sizeof(u64))));
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483 struct sched_avg {
0484 u64 last_update_time;
0485 u64 load_sum;
0486 u64 runnable_sum;
0487 u32 util_sum;
0488 u32 period_contrib;
0489 unsigned long load_avg;
0490 unsigned long runnable_avg;
0491 unsigned long util_avg;
0492 struct util_est util_est;
0493 } ____cacheline_aligned;
0494
0495 struct sched_statistics {
0496 #ifdef CONFIG_SCHEDSTATS
0497 u64 wait_start;
0498 u64 wait_max;
0499 u64 wait_count;
0500 u64 wait_sum;
0501 u64 iowait_count;
0502 u64 iowait_sum;
0503
0504 u64 sleep_start;
0505 u64 sleep_max;
0506 s64 sum_sleep_runtime;
0507
0508 u64 block_start;
0509 u64 block_max;
0510 s64 sum_block_runtime;
0511
0512 u64 exec_max;
0513 u64 slice_max;
0514
0515 u64 nr_migrations_cold;
0516 u64 nr_failed_migrations_affine;
0517 u64 nr_failed_migrations_running;
0518 u64 nr_failed_migrations_hot;
0519 u64 nr_forced_migrations;
0520
0521 u64 nr_wakeups;
0522 u64 nr_wakeups_sync;
0523 u64 nr_wakeups_migrate;
0524 u64 nr_wakeups_local;
0525 u64 nr_wakeups_remote;
0526 u64 nr_wakeups_affine;
0527 u64 nr_wakeups_affine_attempts;
0528 u64 nr_wakeups_passive;
0529 u64 nr_wakeups_idle;
0530
0531 #ifdef CONFIG_SCHED_CORE
0532 u64 core_forceidle_sum;
0533 #endif
0534 #endif
0535 } ____cacheline_aligned;
0536
0537 struct sched_entity {
0538
0539 struct load_weight load;
0540 struct rb_node run_node;
0541 struct list_head group_node;
0542 unsigned int on_rq;
0543
0544 u64 exec_start;
0545 u64 sum_exec_runtime;
0546 u64 vruntime;
0547 u64 prev_sum_exec_runtime;
0548
0549 u64 nr_migrations;
0550
0551 #ifdef CONFIG_FAIR_GROUP_SCHED
0552 int depth;
0553 struct sched_entity *parent;
0554
0555 struct cfs_rq *cfs_rq;
0556
0557 struct cfs_rq *my_q;
0558
0559 unsigned long runnable_weight;
0560 #endif
0561
0562 #ifdef CONFIG_SMP
0563
0564
0565
0566
0567
0568
0569 struct sched_avg avg;
0570 #endif
0571 };
0572
0573 struct sched_rt_entity {
0574 struct list_head run_list;
0575 unsigned long timeout;
0576 unsigned long watchdog_stamp;
0577 unsigned int time_slice;
0578 unsigned short on_rq;
0579 unsigned short on_list;
0580
0581 struct sched_rt_entity *back;
0582 #ifdef CONFIG_RT_GROUP_SCHED
0583 struct sched_rt_entity *parent;
0584
0585 struct rt_rq *rt_rq;
0586
0587 struct rt_rq *my_q;
0588 #endif
0589 } __randomize_layout;
0590
0591 struct sched_dl_entity {
0592 struct rb_node rb_node;
0593
0594
0595
0596
0597
0598
0599 u64 dl_runtime;
0600 u64 dl_deadline;
0601 u64 dl_period;
0602 u64 dl_bw;
0603 u64 dl_density;
0604
0605
0606
0607
0608
0609
0610 s64 runtime;
0611 u64 deadline;
0612 unsigned int flags;
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 unsigned int dl_throttled : 1;
0635 unsigned int dl_yielded : 1;
0636 unsigned int dl_non_contending : 1;
0637 unsigned int dl_overrun : 1;
0638
0639
0640
0641
0642
0643 struct hrtimer dl_timer;
0644
0645
0646
0647
0648
0649
0650
0651
0652 struct hrtimer inactive_timer;
0653
0654 #ifdef CONFIG_RT_MUTEXES
0655
0656
0657
0658
0659
0660 struct sched_dl_entity *pi_se;
0661 #endif
0662 };
0663
0664 #ifdef CONFIG_UCLAMP_TASK
0665
0666 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691 struct uclamp_se {
0692 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
0693 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
0694 unsigned int active : 1;
0695 unsigned int user_defined : 1;
0696 };
0697 #endif
0698
0699 union rcu_special {
0700 struct {
0701 u8 blocked;
0702 u8 need_qs;
0703 u8 exp_hint;
0704 u8 need_mb;
0705 } b;
0706 u32 s;
0707 };
0708
0709 enum perf_event_task_context {
0710 perf_invalid_context = -1,
0711 perf_hw_context = 0,
0712 perf_sw_context,
0713 perf_nr_task_contexts,
0714 };
0715
0716 struct wake_q_node {
0717 struct wake_q_node *next;
0718 };
0719
0720 struct kmap_ctrl {
0721 #ifdef CONFIG_KMAP_LOCAL
0722 int idx;
0723 pte_t pteval[KM_MAX_IDX];
0724 #endif
0725 };
0726
0727 struct task_struct {
0728 #ifdef CONFIG_THREAD_INFO_IN_TASK
0729
0730
0731
0732
0733 struct thread_info thread_info;
0734 #endif
0735 unsigned int __state;
0736
0737 #ifdef CONFIG_PREEMPT_RT
0738
0739 unsigned int saved_state;
0740 #endif
0741
0742
0743
0744
0745
0746 randomized_struct_fields_start
0747
0748 void *stack;
0749 refcount_t usage;
0750
0751 unsigned int flags;
0752 unsigned int ptrace;
0753
0754 #ifdef CONFIG_SMP
0755 int on_cpu;
0756 struct __call_single_node wake_entry;
0757 unsigned int wakee_flips;
0758 unsigned long wakee_flip_decay_ts;
0759 struct task_struct *last_wakee;
0760
0761
0762
0763
0764
0765
0766
0767
0768 int recent_used_cpu;
0769 int wake_cpu;
0770 #endif
0771 int on_rq;
0772
0773 int prio;
0774 int static_prio;
0775 int normal_prio;
0776 unsigned int rt_priority;
0777
0778 struct sched_entity se;
0779 struct sched_rt_entity rt;
0780 struct sched_dl_entity dl;
0781 const struct sched_class *sched_class;
0782
0783 #ifdef CONFIG_SCHED_CORE
0784 struct rb_node core_node;
0785 unsigned long core_cookie;
0786 unsigned int core_occupation;
0787 #endif
0788
0789 #ifdef CONFIG_CGROUP_SCHED
0790 struct task_group *sched_task_group;
0791 #endif
0792
0793 #ifdef CONFIG_UCLAMP_TASK
0794
0795
0796
0797
0798 struct uclamp_se uclamp_req[UCLAMP_CNT];
0799
0800
0801
0802
0803 struct uclamp_se uclamp[UCLAMP_CNT];
0804 #endif
0805
0806 struct sched_statistics stats;
0807
0808 #ifdef CONFIG_PREEMPT_NOTIFIERS
0809
0810 struct hlist_head preempt_notifiers;
0811 #endif
0812
0813 #ifdef CONFIG_BLK_DEV_IO_TRACE
0814 unsigned int btrace_seq;
0815 #endif
0816
0817 unsigned int policy;
0818 int nr_cpus_allowed;
0819 const cpumask_t *cpus_ptr;
0820 cpumask_t *user_cpus_ptr;
0821 cpumask_t cpus_mask;
0822 void *migration_pending;
0823 #ifdef CONFIG_SMP
0824 unsigned short migration_disabled;
0825 #endif
0826 unsigned short migration_flags;
0827
0828 #ifdef CONFIG_PREEMPT_RCU
0829 int rcu_read_lock_nesting;
0830 union rcu_special rcu_read_unlock_special;
0831 struct list_head rcu_node_entry;
0832 struct rcu_node *rcu_blocked_node;
0833 #endif
0834
0835 #ifdef CONFIG_TASKS_RCU
0836 unsigned long rcu_tasks_nvcsw;
0837 u8 rcu_tasks_holdout;
0838 u8 rcu_tasks_idx;
0839 int rcu_tasks_idle_cpu;
0840 struct list_head rcu_tasks_holdout_list;
0841 #endif
0842
0843 #ifdef CONFIG_TASKS_TRACE_RCU
0844 int trc_reader_nesting;
0845 int trc_ipi_to_cpu;
0846 union rcu_special trc_reader_special;
0847 struct list_head trc_holdout_list;
0848 struct list_head trc_blkd_node;
0849 int trc_blkd_cpu;
0850 #endif
0851
0852 struct sched_info sched_info;
0853
0854 struct list_head tasks;
0855 #ifdef CONFIG_SMP
0856 struct plist_node pushable_tasks;
0857 struct rb_node pushable_dl_tasks;
0858 #endif
0859
0860 struct mm_struct *mm;
0861 struct mm_struct *active_mm;
0862
0863
0864 struct vmacache vmacache;
0865
0866 #ifdef SPLIT_RSS_COUNTING
0867 struct task_rss_stat rss_stat;
0868 #endif
0869 int exit_state;
0870 int exit_code;
0871 int exit_signal;
0872
0873 int pdeath_signal;
0874
0875 unsigned long jobctl;
0876
0877
0878 unsigned int personality;
0879
0880
0881 unsigned sched_reset_on_fork:1;
0882 unsigned sched_contributes_to_load:1;
0883 unsigned sched_migrated:1;
0884 #ifdef CONFIG_PSI
0885 unsigned sched_psi_wake_requeue:1;
0886 #endif
0887
0888
0889 unsigned :0;
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906 unsigned sched_remote_wakeup:1;
0907
0908
0909 unsigned in_execve:1;
0910 unsigned in_iowait:1;
0911 #ifndef TIF_RESTORE_SIGMASK
0912 unsigned restore_sigmask:1;
0913 #endif
0914 #ifdef CONFIG_MEMCG
0915 unsigned in_user_fault:1;
0916 #endif
0917 #ifdef CONFIG_COMPAT_BRK
0918 unsigned brk_randomized:1;
0919 #endif
0920 #ifdef CONFIG_CGROUPS
0921
0922 unsigned no_cgroup_migration:1;
0923
0924 unsigned frozen:1;
0925 #endif
0926 #ifdef CONFIG_BLK_CGROUP
0927 unsigned use_memdelay:1;
0928 #endif
0929 #ifdef CONFIG_PSI
0930
0931 unsigned in_memstall:1;
0932 #endif
0933 #ifdef CONFIG_PAGE_OWNER
0934
0935 unsigned in_page_owner:1;
0936 #endif
0937 #ifdef CONFIG_EVENTFD
0938
0939 unsigned in_eventfd_signal:1;
0940 #endif
0941 #ifdef CONFIG_IOMMU_SVA
0942 unsigned pasid_activated:1;
0943 #endif
0944 #ifdef CONFIG_CPU_SUP_INTEL
0945 unsigned reported_split_lock:1;
0946 #endif
0947
0948 unsigned long atomic_flags;
0949
0950 struct restart_block restart_block;
0951
0952 pid_t pid;
0953 pid_t tgid;
0954
0955 #ifdef CONFIG_STACKPROTECTOR
0956
0957 unsigned long stack_canary;
0958 #endif
0959
0960
0961
0962
0963
0964
0965
0966 struct task_struct __rcu *real_parent;
0967
0968
0969 struct task_struct __rcu *parent;
0970
0971
0972
0973
0974 struct list_head children;
0975 struct list_head sibling;
0976 struct task_struct *group_leader;
0977
0978
0979
0980
0981
0982
0983
0984 struct list_head ptraced;
0985 struct list_head ptrace_entry;
0986
0987
0988 struct pid *thread_pid;
0989 struct hlist_node pid_links[PIDTYPE_MAX];
0990 struct list_head thread_group;
0991 struct list_head thread_node;
0992
0993 struct completion *vfork_done;
0994
0995
0996 int __user *set_child_tid;
0997
0998
0999 int __user *clear_child_tid;
1000
1001
1002 void *worker_private;
1003
1004 u64 utime;
1005 u64 stime;
1006 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1007 u64 utimescaled;
1008 u64 stimescaled;
1009 #endif
1010 u64 gtime;
1011 struct prev_cputime prev_cputime;
1012 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1013 struct vtime vtime;
1014 #endif
1015
1016 #ifdef CONFIG_NO_HZ_FULL
1017 atomic_t tick_dep_mask;
1018 #endif
1019
1020 unsigned long nvcsw;
1021 unsigned long nivcsw;
1022
1023
1024 u64 start_time;
1025
1026
1027 u64 start_boottime;
1028
1029
1030 unsigned long min_flt;
1031 unsigned long maj_flt;
1032
1033
1034 struct posix_cputimers posix_cputimers;
1035
1036 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1037 struct posix_cputimers_work posix_cputimers_work;
1038 #endif
1039
1040
1041
1042
1043 const struct cred __rcu *ptracer_cred;
1044
1045
1046 const struct cred __rcu *real_cred;
1047
1048
1049 const struct cred __rcu *cred;
1050
1051 #ifdef CONFIG_KEYS
1052
1053 struct key *cached_requested_key;
1054 #endif
1055
1056
1057
1058
1059
1060
1061
1062
1063 char comm[TASK_COMM_LEN];
1064
1065 struct nameidata *nameidata;
1066
1067 #ifdef CONFIG_SYSVIPC
1068 struct sysv_sem sysvsem;
1069 struct sysv_shm sysvshm;
1070 #endif
1071 #ifdef CONFIG_DETECT_HUNG_TASK
1072 unsigned long last_switch_count;
1073 unsigned long last_switch_time;
1074 #endif
1075
1076 struct fs_struct *fs;
1077
1078
1079 struct files_struct *files;
1080
1081 #ifdef CONFIG_IO_URING
1082 struct io_uring_task *io_uring;
1083 #endif
1084
1085
1086 struct nsproxy *nsproxy;
1087
1088
1089 struct signal_struct *signal;
1090 struct sighand_struct __rcu *sighand;
1091 sigset_t blocked;
1092 sigset_t real_blocked;
1093
1094 sigset_t saved_sigmask;
1095 struct sigpending pending;
1096 unsigned long sas_ss_sp;
1097 size_t sas_ss_size;
1098 unsigned int sas_ss_flags;
1099
1100 struct callback_head *task_works;
1101
1102 #ifdef CONFIG_AUDIT
1103 #ifdef CONFIG_AUDITSYSCALL
1104 struct audit_context *audit_context;
1105 #endif
1106 kuid_t loginuid;
1107 unsigned int sessionid;
1108 #endif
1109 struct seccomp seccomp;
1110 struct syscall_user_dispatch syscall_dispatch;
1111
1112
1113 u64 parent_exec_id;
1114 u64 self_exec_id;
1115
1116
1117 spinlock_t alloc_lock;
1118
1119
1120 raw_spinlock_t pi_lock;
1121
1122 struct wake_q_node wake_q;
1123
1124 #ifdef CONFIG_RT_MUTEXES
1125
1126 struct rb_root_cached pi_waiters;
1127
1128 struct task_struct *pi_top_task;
1129
1130 struct rt_mutex_waiter *pi_blocked_on;
1131 #endif
1132
1133 #ifdef CONFIG_DEBUG_MUTEXES
1134
1135 struct mutex_waiter *blocked_on;
1136 #endif
1137
1138 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1139 int non_block_count;
1140 #endif
1141
1142 #ifdef CONFIG_TRACE_IRQFLAGS
1143 struct irqtrace_events irqtrace;
1144 unsigned int hardirq_threaded;
1145 u64 hardirq_chain_key;
1146 int softirqs_enabled;
1147 int softirq_context;
1148 int irq_config;
1149 #endif
1150 #ifdef CONFIG_PREEMPT_RT
1151 int softirq_disable_cnt;
1152 #endif
1153
1154 #ifdef CONFIG_LOCKDEP
1155 # define MAX_LOCK_DEPTH 48UL
1156 u64 curr_chain_key;
1157 int lockdep_depth;
1158 unsigned int lockdep_recursion;
1159 struct held_lock held_locks[MAX_LOCK_DEPTH];
1160 #endif
1161
1162 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1163 unsigned int in_ubsan;
1164 #endif
1165
1166
1167 void *journal_info;
1168
1169
1170 struct bio_list *bio_list;
1171
1172
1173 struct blk_plug *plug;
1174
1175
1176 struct reclaim_state *reclaim_state;
1177
1178 struct backing_dev_info *backing_dev_info;
1179
1180 struct io_context *io_context;
1181
1182 #ifdef CONFIG_COMPACTION
1183 struct capture_control *capture_control;
1184 #endif
1185
1186 unsigned long ptrace_message;
1187 kernel_siginfo_t *last_siginfo;
1188
1189 struct task_io_accounting ioac;
1190 #ifdef CONFIG_PSI
1191
1192 unsigned int psi_flags;
1193 #endif
1194 #ifdef CONFIG_TASK_XACCT
1195
1196 u64 acct_rss_mem1;
1197
1198 u64 acct_vm_mem1;
1199
1200 u64 acct_timexpd;
1201 #endif
1202 #ifdef CONFIG_CPUSETS
1203
1204 nodemask_t mems_allowed;
1205
1206 seqcount_spinlock_t mems_allowed_seq;
1207 int cpuset_mem_spread_rotor;
1208 int cpuset_slab_spread_rotor;
1209 #endif
1210 #ifdef CONFIG_CGROUPS
1211
1212 struct css_set __rcu *cgroups;
1213
1214 struct list_head cg_list;
1215 #endif
1216 #ifdef CONFIG_X86_CPU_RESCTRL
1217 u32 closid;
1218 u32 rmid;
1219 #endif
1220 #ifdef CONFIG_FUTEX
1221 struct robust_list_head __user *robust_list;
1222 #ifdef CONFIG_COMPAT
1223 struct compat_robust_list_head __user *compat_robust_list;
1224 #endif
1225 struct list_head pi_state_list;
1226 struct futex_pi_state *pi_state_cache;
1227 struct mutex futex_exit_mutex;
1228 unsigned int futex_state;
1229 #endif
1230 #ifdef CONFIG_PERF_EVENTS
1231 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1232 struct mutex perf_event_mutex;
1233 struct list_head perf_event_list;
1234 #endif
1235 #ifdef CONFIG_DEBUG_PREEMPT
1236 unsigned long preempt_disable_ip;
1237 #endif
1238 #ifdef CONFIG_NUMA
1239
1240 struct mempolicy *mempolicy;
1241 short il_prev;
1242 short pref_node_fork;
1243 #endif
1244 #ifdef CONFIG_NUMA_BALANCING
1245 int numa_scan_seq;
1246 unsigned int numa_scan_period;
1247 unsigned int numa_scan_period_max;
1248 int numa_preferred_nid;
1249 unsigned long numa_migrate_retry;
1250
1251 u64 node_stamp;
1252 u64 last_task_numa_placement;
1253 u64 last_sum_exec_runtime;
1254 struct callback_head numa_work;
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264 struct numa_group __rcu *numa_group;
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 unsigned long *numa_faults;
1281 unsigned long total_numa_faults;
1282
1283
1284
1285
1286
1287
1288
1289 unsigned long numa_faults_locality[3];
1290
1291 unsigned long numa_pages_migrated;
1292 #endif
1293
1294 #ifdef CONFIG_RSEQ
1295 struct rseq __user *rseq;
1296 u32 rseq_sig;
1297
1298
1299
1300
1301 unsigned long rseq_event_mask;
1302 #endif
1303
1304 struct tlbflush_unmap_batch tlb_ubc;
1305
1306 union {
1307 refcount_t rcu_users;
1308 struct rcu_head rcu;
1309 };
1310
1311
1312 struct pipe_inode_info *splice_pipe;
1313
1314 struct page_frag task_frag;
1315
1316 #ifdef CONFIG_TASK_DELAY_ACCT
1317 struct task_delay_info *delays;
1318 #endif
1319
1320 #ifdef CONFIG_FAULT_INJECTION
1321 int make_it_fail;
1322 unsigned int fail_nth;
1323 #endif
1324
1325
1326
1327
1328 int nr_dirtied;
1329 int nr_dirtied_pause;
1330
1331 unsigned long dirty_paused_when;
1332
1333 #ifdef CONFIG_LATENCYTOP
1334 int latency_record_count;
1335 struct latency_record latency_record[LT_SAVECOUNT];
1336 #endif
1337
1338
1339
1340
1341 u64 timer_slack_ns;
1342 u64 default_timer_slack_ns;
1343
1344 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1345 unsigned int kasan_depth;
1346 #endif
1347
1348 #ifdef CONFIG_KCSAN
1349 struct kcsan_ctx kcsan_ctx;
1350 #ifdef CONFIG_TRACE_IRQFLAGS
1351 struct irqtrace_events kcsan_save_irqtrace;
1352 #endif
1353 #ifdef CONFIG_KCSAN_WEAK_MEMORY
1354 int kcsan_stack_depth;
1355 #endif
1356 #endif
1357
1358 #if IS_ENABLED(CONFIG_KUNIT)
1359 struct kunit *kunit_test;
1360 #endif
1361
1362 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1363
1364 int curr_ret_stack;
1365 int curr_ret_depth;
1366
1367
1368 struct ftrace_ret_stack *ret_stack;
1369
1370
1371 unsigned long long ftrace_timestamp;
1372
1373
1374
1375
1376
1377 atomic_t trace_overrun;
1378
1379
1380 atomic_t tracing_graph_pause;
1381 #endif
1382
1383 #ifdef CONFIG_TRACING
1384
1385 unsigned long trace;
1386
1387
1388 unsigned long trace_recursion;
1389 #endif
1390
1391 #ifdef CONFIG_KCOV
1392
1393
1394
1395 unsigned int kcov_mode;
1396
1397
1398 unsigned int kcov_size;
1399
1400
1401 void *kcov_area;
1402
1403
1404 struct kcov *kcov;
1405
1406
1407 u64 kcov_handle;
1408
1409
1410 int kcov_sequence;
1411
1412
1413 unsigned int kcov_softirq;
1414 #endif
1415
1416 #ifdef CONFIG_MEMCG
1417 struct mem_cgroup *memcg_in_oom;
1418 gfp_t memcg_oom_gfp_mask;
1419 int memcg_oom_order;
1420
1421
1422 unsigned int memcg_nr_pages_over_high;
1423
1424
1425 struct mem_cgroup *active_memcg;
1426 #endif
1427
1428 #ifdef CONFIG_BLK_CGROUP
1429 struct request_queue *throttle_queue;
1430 #endif
1431
1432 #ifdef CONFIG_UPROBES
1433 struct uprobe_task *utask;
1434 #endif
1435 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1436 unsigned int sequential_io;
1437 unsigned int sequential_io_avg;
1438 #endif
1439 struct kmap_ctrl kmap_ctrl;
1440 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1441 unsigned long task_state_change;
1442 # ifdef CONFIG_PREEMPT_RT
1443 unsigned long saved_state_change;
1444 # endif
1445 #endif
1446 int pagefault_disabled;
1447 #ifdef CONFIG_MMU
1448 struct task_struct *oom_reaper_list;
1449 struct timer_list oom_reaper_timer;
1450 #endif
1451 #ifdef CONFIG_VMAP_STACK
1452 struct vm_struct *stack_vm_area;
1453 #endif
1454 #ifdef CONFIG_THREAD_INFO_IN_TASK
1455
1456 refcount_t stack_refcount;
1457 #endif
1458 #ifdef CONFIG_LIVEPATCH
1459 int patch_state;
1460 #endif
1461 #ifdef CONFIG_SECURITY
1462
1463 void *security;
1464 #endif
1465 #ifdef CONFIG_BPF_SYSCALL
1466
1467 struct bpf_local_storage __rcu *bpf_storage;
1468
1469 struct bpf_run_ctx *bpf_ctx;
1470 #endif
1471
1472 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1473 unsigned long lowest_stack;
1474 unsigned long prev_lowest_stack;
1475 #endif
1476
1477 #ifdef CONFIG_X86_MCE
1478 void __user *mce_vaddr;
1479 __u64 mce_kflags;
1480 u64 mce_addr;
1481 __u64 mce_ripv : 1,
1482 mce_whole_page : 1,
1483 __mce_reserved : 62;
1484 struct callback_head mce_kill_me;
1485 int mce_count;
1486 #endif
1487
1488 #ifdef CONFIG_KRETPROBES
1489 struct llist_head kretprobe_instances;
1490 #endif
1491 #ifdef CONFIG_RETHOOK
1492 struct llist_head rethooks;
1493 #endif
1494
1495 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1496
1497
1498
1499
1500
1501
1502 struct callback_head l1d_flush_kill;
1503 #endif
1504
1505 #ifdef CONFIG_RV
1506
1507
1508
1509
1510
1511
1512 union rv_task_monitor rv[RV_PER_TASK_MONITORS];
1513 #endif
1514
1515
1516
1517
1518
1519 randomized_struct_fields_end
1520
1521
1522 struct thread_struct thread;
1523
1524
1525
1526
1527
1528
1529
1530 };
1531
1532 static inline struct pid *task_pid(struct task_struct *task)
1533 {
1534 return task->thread_pid;
1535 }
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1549
1550 static inline pid_t task_pid_nr(struct task_struct *tsk)
1551 {
1552 return tsk->pid;
1553 }
1554
1555 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1556 {
1557 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1558 }
1559
1560 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1561 {
1562 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1563 }
1564
1565
1566 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1567 {
1568 return tsk->tgid;
1569 }
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 static inline int pid_alive(const struct task_struct *p)
1582 {
1583 return p->thread_pid != NULL;
1584 }
1585
1586 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1587 {
1588 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1589 }
1590
1591 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1592 {
1593 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1594 }
1595
1596
1597 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1598 {
1599 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1600 }
1601
1602 static inline pid_t task_session_vnr(struct task_struct *tsk)
1603 {
1604 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1605 }
1606
1607 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1608 {
1609 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1610 }
1611
1612 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1613 {
1614 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1615 }
1616
1617 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1618 {
1619 pid_t pid = 0;
1620
1621 rcu_read_lock();
1622 if (pid_alive(tsk))
1623 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1624 rcu_read_unlock();
1625
1626 return pid;
1627 }
1628
1629 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1630 {
1631 return task_ppid_nr_ns(tsk, &init_pid_ns);
1632 }
1633
1634
1635 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1636 {
1637 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1638 }
1639
1640 #define TASK_REPORT_IDLE (TASK_REPORT + 1)
1641 #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1642
1643 static inline unsigned int __task_state_index(unsigned int tsk_state,
1644 unsigned int tsk_exit_state)
1645 {
1646 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
1647
1648 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1649
1650 if (tsk_state == TASK_IDLE)
1651 state = TASK_REPORT_IDLE;
1652
1653
1654
1655
1656
1657
1658 if (tsk_state == TASK_RTLOCK_WAIT)
1659 state = TASK_UNINTERRUPTIBLE;
1660
1661 return fls(state);
1662 }
1663
1664 static inline unsigned int task_state_index(struct task_struct *tsk)
1665 {
1666 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1667 }
1668
1669 static inline char task_index_to_char(unsigned int state)
1670 {
1671 static const char state_char[] = "RSDTtXZPI";
1672
1673 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1674
1675 return state_char[state];
1676 }
1677
1678 static inline char task_state_to_char(struct task_struct *tsk)
1679 {
1680 return task_index_to_char(task_state_index(tsk));
1681 }
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 static inline int is_global_init(struct task_struct *tsk)
1693 {
1694 return task_tgid_nr(tsk) == 1;
1695 }
1696
1697 extern struct pid *cad_pid;
1698
1699
1700
1701
1702 #define PF_VCPU 0x00000001
1703 #define PF_IDLE 0x00000002
1704 #define PF_EXITING 0x00000004
1705 #define PF_POSTCOREDUMP 0x00000008
1706 #define PF_IO_WORKER 0x00000010
1707 #define PF_WQ_WORKER 0x00000020
1708 #define PF_FORKNOEXEC 0x00000040
1709 #define PF_MCE_PROCESS 0x00000080
1710 #define PF_SUPERPRIV 0x00000100
1711 #define PF_DUMPCORE 0x00000200
1712 #define PF_SIGNALED 0x00000400
1713 #define PF_MEMALLOC 0x00000800
1714 #define PF_NPROC_EXCEEDED 0x00001000
1715 #define PF_USED_MATH 0x00002000
1716 #define PF_NOFREEZE 0x00008000
1717 #define PF_FROZEN 0x00010000
1718 #define PF_KSWAPD 0x00020000
1719 #define PF_MEMALLOC_NOFS 0x00040000
1720 #define PF_MEMALLOC_NOIO 0x00080000
1721 #define PF_LOCAL_THROTTLE 0x00100000
1722
1723 #define PF_KTHREAD 0x00200000
1724 #define PF_RANDOMIZE 0x00400000
1725 #define PF_NO_SETAFFINITY 0x04000000
1726 #define PF_MCE_EARLY 0x08000000
1727 #define PF_MEMALLOC_PIN 0x10000000
1728 #define PF_FREEZER_SKIP 0x40000000
1729 #define PF_SUSPEND_TASK 0x80000000
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1743 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1744 #define clear_used_math() clear_stopped_child_used_math(current)
1745 #define set_used_math() set_stopped_child_used_math(current)
1746
1747 #define conditional_stopped_child_used_math(condition, child) \
1748 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1749
1750 #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1751
1752 #define copy_to_stopped_child_used_math(child) \
1753 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1754
1755
1756 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1757 #define used_math() tsk_used_math(current)
1758
1759 static __always_inline bool is_percpu_thread(void)
1760 {
1761 #ifdef CONFIG_SMP
1762 return (current->flags & PF_NO_SETAFFINITY) &&
1763 (current->nr_cpus_allowed == 1);
1764 #else
1765 return true;
1766 #endif
1767 }
1768
1769
1770 #define PFA_NO_NEW_PRIVS 0
1771 #define PFA_SPREAD_PAGE 1
1772 #define PFA_SPREAD_SLAB 2
1773 #define PFA_SPEC_SSB_DISABLE 3
1774 #define PFA_SPEC_SSB_FORCE_DISABLE 4
1775 #define PFA_SPEC_IB_DISABLE 5
1776 #define PFA_SPEC_IB_FORCE_DISABLE 6
1777 #define PFA_SPEC_SSB_NOEXEC 7
1778
1779 #define TASK_PFA_TEST(name, func) \
1780 static inline bool task_##func(struct task_struct *p) \
1781 { return test_bit(PFA_##name, &p->atomic_flags); }
1782
1783 #define TASK_PFA_SET(name, func) \
1784 static inline void task_set_##func(struct task_struct *p) \
1785 { set_bit(PFA_##name, &p->atomic_flags); }
1786
1787 #define TASK_PFA_CLEAR(name, func) \
1788 static inline void task_clear_##func(struct task_struct *p) \
1789 { clear_bit(PFA_##name, &p->atomic_flags); }
1790
1791 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1792 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1793
1794 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1795 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1796 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1797
1798 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1799 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1800 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1801
1802 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1803 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1804 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1805
1806 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1807 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1808 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1809
1810 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1811 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1812
1813 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1814 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1815 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1816
1817 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1818 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1819
1820 static inline void
1821 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1822 {
1823 current->flags &= ~flags;
1824 current->flags |= orig_flags & flags;
1825 }
1826
1827 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1828 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
1829 #ifdef CONFIG_SMP
1830 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1831 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1832 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1833 extern void release_user_cpus_ptr(struct task_struct *p);
1834 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1835 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1836 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1837 #else
1838 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1839 {
1840 }
1841 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1842 {
1843 if (!cpumask_test_cpu(0, new_mask))
1844 return -EINVAL;
1845 return 0;
1846 }
1847 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1848 {
1849 if (src->user_cpus_ptr)
1850 return -EINVAL;
1851 return 0;
1852 }
1853 static inline void release_user_cpus_ptr(struct task_struct *p)
1854 {
1855 WARN_ON(p->user_cpus_ptr);
1856 }
1857
1858 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1859 {
1860 return 0;
1861 }
1862 #endif
1863
1864 extern int yield_to(struct task_struct *p, bool preempt);
1865 extern void set_user_nice(struct task_struct *p, long nice);
1866 extern int task_prio(const struct task_struct *p);
1867
1868
1869
1870
1871
1872
1873
1874 static inline int task_nice(const struct task_struct *p)
1875 {
1876 return PRIO_TO_NICE((p)->static_prio);
1877 }
1878
1879 extern int can_nice(const struct task_struct *p, const int nice);
1880 extern int task_curr(const struct task_struct *p);
1881 extern int idle_cpu(int cpu);
1882 extern int available_idle_cpu(int cpu);
1883 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1884 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1885 extern void sched_set_fifo(struct task_struct *p);
1886 extern void sched_set_fifo_low(struct task_struct *p);
1887 extern void sched_set_normal(struct task_struct *p, int nice);
1888 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1889 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1890 extern struct task_struct *idle_task(int cpu);
1891
1892
1893
1894
1895
1896
1897
1898 static __always_inline bool is_idle_task(const struct task_struct *p)
1899 {
1900 return !!(p->flags & PF_IDLE);
1901 }
1902
1903 extern struct task_struct *curr_task(int cpu);
1904 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1905
1906 void yield(void);
1907
1908 union thread_union {
1909 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1910 struct task_struct task;
1911 #endif
1912 #ifndef CONFIG_THREAD_INFO_IN_TASK
1913 struct thread_info thread_info;
1914 #endif
1915 unsigned long stack[THREAD_SIZE/sizeof(long)];
1916 };
1917
1918 #ifndef CONFIG_THREAD_INFO_IN_TASK
1919 extern struct thread_info init_thread_info;
1920 #endif
1921
1922 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1923
1924 #ifdef CONFIG_THREAD_INFO_IN_TASK
1925 # define task_thread_info(task) (&(task)->thread_info)
1926 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1927 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1928 #endif
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941 extern struct task_struct *find_task_by_vpid(pid_t nr);
1942 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1943
1944
1945
1946
1947 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1948
1949 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1950 extern int wake_up_process(struct task_struct *tsk);
1951 extern void wake_up_new_task(struct task_struct *tsk);
1952
1953 #ifdef CONFIG_SMP
1954 extern void kick_process(struct task_struct *tsk);
1955 #else
1956 static inline void kick_process(struct task_struct *tsk) { }
1957 #endif
1958
1959 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1960
1961 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1962 {
1963 __set_task_comm(tsk, from, false);
1964 }
1965
1966 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1967 #define get_task_comm(buf, tsk) ({ \
1968 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1969 __get_task_comm(buf, sizeof(buf), tsk); \
1970 })
1971
1972 #ifdef CONFIG_SMP
1973 static __always_inline void scheduler_ipi(void)
1974 {
1975
1976
1977
1978
1979
1980 preempt_fold_need_resched();
1981 }
1982 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1983 #else
1984 static inline void scheduler_ipi(void) { }
1985 static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
1986 {
1987 return 1;
1988 }
1989 #endif
1990
1991
1992
1993
1994
1995 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1996 {
1997 set_ti_thread_flag(task_thread_info(tsk), flag);
1998 }
1999
2000 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2001 {
2002 clear_ti_thread_flag(task_thread_info(tsk), flag);
2003 }
2004
2005 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
2006 bool value)
2007 {
2008 update_ti_thread_flag(task_thread_info(tsk), flag, value);
2009 }
2010
2011 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2012 {
2013 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2014 }
2015
2016 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2017 {
2018 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2019 }
2020
2021 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2022 {
2023 return test_ti_thread_flag(task_thread_info(tsk), flag);
2024 }
2025
2026 static inline void set_tsk_need_resched(struct task_struct *tsk)
2027 {
2028 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2029 }
2030
2031 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2032 {
2033 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2034 }
2035
2036 static inline int test_tsk_need_resched(struct task_struct *tsk)
2037 {
2038 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2039 }
2040
2041
2042
2043
2044
2045
2046
2047 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2048 extern int __cond_resched(void);
2049
2050 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2051
2052 DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2053
2054 static __always_inline int _cond_resched(void)
2055 {
2056 return static_call_mod(cond_resched)();
2057 }
2058
2059 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2060 extern int dynamic_cond_resched(void);
2061
2062 static __always_inline int _cond_resched(void)
2063 {
2064 return dynamic_cond_resched();
2065 }
2066
2067 #else
2068
2069 static inline int _cond_resched(void)
2070 {
2071 return __cond_resched();
2072 }
2073
2074 #endif
2075
2076 #else
2077
2078 static inline int _cond_resched(void) { return 0; }
2079
2080 #endif
2081
2082 #define cond_resched() ({ \
2083 __might_resched(__FILE__, __LINE__, 0); \
2084 _cond_resched(); \
2085 })
2086
2087 extern int __cond_resched_lock(spinlock_t *lock);
2088 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2089 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2090
2091 #define MIGHT_RESCHED_RCU_SHIFT 8
2092 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2093
2094 #ifndef CONFIG_PREEMPT_RT
2095
2096
2097
2098
2099 # define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
2100 #else
2101
2102
2103
2104
2105
2106 # define PREEMPT_LOCK_RESCHED_OFFSETS \
2107 (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2108 #endif
2109
2110 #define cond_resched_lock(lock) ({ \
2111 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2112 __cond_resched_lock(lock); \
2113 })
2114
2115 #define cond_resched_rwlock_read(lock) ({ \
2116 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2117 __cond_resched_rwlock_read(lock); \
2118 })
2119
2120 #define cond_resched_rwlock_write(lock) ({ \
2121 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2122 __cond_resched_rwlock_write(lock); \
2123 })
2124
2125 static inline void cond_resched_rcu(void)
2126 {
2127 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2128 rcu_read_unlock();
2129 cond_resched();
2130 rcu_read_lock();
2131 #endif
2132 }
2133
2134 #ifdef CONFIG_PREEMPT_DYNAMIC
2135
2136 extern bool preempt_model_none(void);
2137 extern bool preempt_model_voluntary(void);
2138 extern bool preempt_model_full(void);
2139
2140 #else
2141
2142 static inline bool preempt_model_none(void)
2143 {
2144 return IS_ENABLED(CONFIG_PREEMPT_NONE);
2145 }
2146 static inline bool preempt_model_voluntary(void)
2147 {
2148 return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
2149 }
2150 static inline bool preempt_model_full(void)
2151 {
2152 return IS_ENABLED(CONFIG_PREEMPT);
2153 }
2154
2155 #endif
2156
2157 static inline bool preempt_model_rt(void)
2158 {
2159 return IS_ENABLED(CONFIG_PREEMPT_RT);
2160 }
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170 static inline bool preempt_model_preemptible(void)
2171 {
2172 return preempt_model_full() || preempt_model_rt();
2173 }
2174
2175
2176
2177
2178
2179
2180 static inline int spin_needbreak(spinlock_t *lock)
2181 {
2182 #ifdef CONFIG_PREEMPTION
2183 return spin_is_contended(lock);
2184 #else
2185 return 0;
2186 #endif
2187 }
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197 static inline int rwlock_needbreak(rwlock_t *lock)
2198 {
2199 #ifdef CONFIG_PREEMPTION
2200 return rwlock_is_contended(lock);
2201 #else
2202 return 0;
2203 #endif
2204 }
2205
2206 static __always_inline bool need_resched(void)
2207 {
2208 return unlikely(tif_need_resched());
2209 }
2210
2211
2212
2213
2214 #ifdef CONFIG_SMP
2215
2216 static inline unsigned int task_cpu(const struct task_struct *p)
2217 {
2218 return READ_ONCE(task_thread_info(p)->cpu);
2219 }
2220
2221 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2222
2223 #else
2224
2225 static inline unsigned int task_cpu(const struct task_struct *p)
2226 {
2227 return 0;
2228 }
2229
2230 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2231 {
2232 }
2233
2234 #endif
2235
2236 extern bool sched_task_on_rq(struct task_struct *p);
2237 extern unsigned long get_wchan(struct task_struct *p);
2238 extern struct task_struct *cpu_curr_snapshot(int cpu);
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248 #ifndef vcpu_is_preempted
2249 static inline bool vcpu_is_preempted(int cpu)
2250 {
2251 return false;
2252 }
2253 #endif
2254
2255 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2256 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2257
2258 #ifndef TASK_SIZE_OF
2259 #define TASK_SIZE_OF(tsk) TASK_SIZE
2260 #endif
2261
2262 #ifdef CONFIG_SMP
2263 static inline bool owner_on_cpu(struct task_struct *owner)
2264 {
2265
2266
2267
2268
2269 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2270 }
2271
2272
2273 unsigned long sched_cpu_util(int cpu);
2274 #endif
2275
2276 #ifdef CONFIG_RSEQ
2277
2278
2279
2280
2281
2282 enum rseq_event_mask_bits {
2283 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2284 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2285 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2286 };
2287
2288 enum rseq_event_mask {
2289 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
2290 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
2291 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
2292 };
2293
2294 static inline void rseq_set_notify_resume(struct task_struct *t)
2295 {
2296 if (t->rseq)
2297 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2298 }
2299
2300 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2301
2302 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2303 struct pt_regs *regs)
2304 {
2305 if (current->rseq)
2306 __rseq_handle_notify_resume(ksig, regs);
2307 }
2308
2309 static inline void rseq_signal_deliver(struct ksignal *ksig,
2310 struct pt_regs *regs)
2311 {
2312 preempt_disable();
2313 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
2314 preempt_enable();
2315 rseq_handle_notify_resume(ksig, regs);
2316 }
2317
2318
2319 static inline void rseq_preempt(struct task_struct *t)
2320 {
2321 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2322 rseq_set_notify_resume(t);
2323 }
2324
2325
2326 static inline void rseq_migrate(struct task_struct *t)
2327 {
2328 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2329 rseq_set_notify_resume(t);
2330 }
2331
2332
2333
2334
2335
2336 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2337 {
2338 if (clone_flags & CLONE_VM) {
2339 t->rseq = NULL;
2340 t->rseq_sig = 0;
2341 t->rseq_event_mask = 0;
2342 } else {
2343 t->rseq = current->rseq;
2344 t->rseq_sig = current->rseq_sig;
2345 t->rseq_event_mask = current->rseq_event_mask;
2346 }
2347 }
2348
2349 static inline void rseq_execve(struct task_struct *t)
2350 {
2351 t->rseq = NULL;
2352 t->rseq_sig = 0;
2353 t->rseq_event_mask = 0;
2354 }
2355
2356 #else
2357
2358 static inline void rseq_set_notify_resume(struct task_struct *t)
2359 {
2360 }
2361 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2362 struct pt_regs *regs)
2363 {
2364 }
2365 static inline void rseq_signal_deliver(struct ksignal *ksig,
2366 struct pt_regs *regs)
2367 {
2368 }
2369 static inline void rseq_preempt(struct task_struct *t)
2370 {
2371 }
2372 static inline void rseq_migrate(struct task_struct *t)
2373 {
2374 }
2375 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2376 {
2377 }
2378 static inline void rseq_execve(struct task_struct *t)
2379 {
2380 }
2381
2382 #endif
2383
2384 #ifdef CONFIG_DEBUG_RSEQ
2385
2386 void rseq_syscall(struct pt_regs *regs);
2387
2388 #else
2389
2390 static inline void rseq_syscall(struct pt_regs *regs)
2391 {
2392 }
2393
2394 #endif
2395
2396 #ifdef CONFIG_SCHED_CORE
2397 extern void sched_core_free(struct task_struct *tsk);
2398 extern void sched_core_fork(struct task_struct *p);
2399 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2400 unsigned long uaddr);
2401 #else
2402 static inline void sched_core_free(struct task_struct *tsk) { }
2403 static inline void sched_core_fork(struct task_struct *p) { }
2404 #endif
2405
2406 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2407
2408 #endif