0001
0002 #ifndef _LINUX_SCHED_TASK_H
0003 #define _LINUX_SCHED_TASK_H
0004
0005
0006
0007
0008
0009
0010 #include <linux/sched.h>
0011 #include <linux/uaccess.h>
0012
0013 struct task_struct;
0014 struct rusage;
0015 union thread_union;
0016 struct css_set;
0017
0018
0019 #define CLONE_LEGACY_FLAGS 0xffffffffULL
0020
0021 struct kernel_clone_args {
0022 u64 flags;
0023 int __user *pidfd;
0024 int __user *child_tid;
0025 int __user *parent_tid;
0026 int exit_signal;
0027 unsigned long stack;
0028 unsigned long stack_size;
0029 unsigned long tls;
0030 pid_t *set_tid;
0031
0032 size_t set_tid_size;
0033 int cgroup;
0034 int io_thread;
0035 int kthread;
0036 int idle;
0037 int (*fn)(void *);
0038 void *fn_arg;
0039 struct cgroup *cgrp;
0040 struct css_set *cset;
0041 };
0042
0043
0044
0045
0046
0047
0048
0049 extern rwlock_t tasklist_lock;
0050 extern spinlock_t mmlist_lock;
0051
0052 extern union thread_union init_thread_union;
0053 extern struct task_struct init_task;
0054
0055 extern int lockdep_tasklist_lock_is_held(void);
0056
0057 extern asmlinkage void schedule_tail(struct task_struct *prev);
0058 extern void init_idle(struct task_struct *idle, int cpu);
0059
0060 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
0061 extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
0062 extern void sched_post_fork(struct task_struct *p);
0063 extern void sched_dead(struct task_struct *p);
0064
0065 void __noreturn do_task_dead(void);
0066 void __noreturn make_task_dead(int signr);
0067
0068 extern void proc_caches_init(void);
0069
0070 extern void fork_init(void);
0071
0072 extern void release_task(struct task_struct * p);
0073
0074 extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
0075
0076 extern void flush_thread(void);
0077
0078 #ifdef CONFIG_HAVE_EXIT_THREAD
0079 extern void exit_thread(struct task_struct *tsk);
0080 #else
0081 static inline void exit_thread(struct task_struct *tsk)
0082 {
0083 }
0084 #endif
0085 extern __noreturn void do_group_exit(int);
0086
0087 extern void exit_files(struct task_struct *);
0088 extern void exit_itimers(struct task_struct *);
0089
0090 extern pid_t kernel_clone(struct kernel_clone_args *kargs);
0091 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
0092 struct task_struct *fork_idle(int);
0093 struct mm_struct *copy_init_mm(void);
0094 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
0095 extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
0096 extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
0097 int kernel_wait(pid_t pid, int *stat);
0098
0099 extern void free_task(struct task_struct *tsk);
0100
0101
0102 #ifdef CONFIG_SMP
0103 extern void sched_exec(void);
0104 #else
0105 #define sched_exec() {}
0106 #endif
0107
0108 static inline struct task_struct *get_task_struct(struct task_struct *t)
0109 {
0110 refcount_inc(&t->usage);
0111 return t;
0112 }
0113
0114 extern void __put_task_struct(struct task_struct *t);
0115
0116 static inline void put_task_struct(struct task_struct *t)
0117 {
0118 if (refcount_dec_and_test(&t->usage))
0119 __put_task_struct(t);
0120 }
0121
0122 static inline void put_task_struct_many(struct task_struct *t, int nr)
0123 {
0124 if (refcount_sub_and_test(nr, &t->usage))
0125 __put_task_struct(t);
0126 }
0127
0128 void put_task_struct_rcu_user(struct task_struct *task);
0129
0130 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
0131 extern int arch_task_struct_size __read_mostly;
0132 #else
0133 # define arch_task_struct_size (sizeof(struct task_struct))
0134 #endif
0135
0136 #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
0137
0138
0139
0140
0141 static inline void arch_thread_struct_whitelist(unsigned long *offset,
0142 unsigned long *size)
0143 {
0144 *offset = 0;
0145
0146 *size = arch_task_struct_size - offsetof(struct task_struct, thread);
0147 }
0148 #endif
0149
0150 #ifdef CONFIG_VMAP_STACK
0151 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
0152 {
0153 return t->stack_vm_area;
0154 }
0155 #else
0156 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
0157 {
0158 return NULL;
0159 }
0160 #endif
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 static inline void task_lock(struct task_struct *p)
0173 {
0174 spin_lock(&p->alloc_lock);
0175 }
0176
0177 static inline void task_unlock(struct task_struct *p)
0178 {
0179 spin_unlock(&p->alloc_lock);
0180 }
0181
0182 #endif