0001
0002 #ifndef _LINUX_SCHED_CPUTIME_H
0003 #define _LINUX_SCHED_CPUTIME_H
0004
0005 #include <linux/sched/signal.h>
0006
0007
0008
0009
0010
0011 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0012 #include <asm/cputime.h>
0013
0014 #ifndef cputime_to_nsecs
0015 # define cputime_to_nsecs(__ct) \
0016 (cputime_to_usecs(__ct) * NSEC_PER_USEC)
0017 #endif
0018 #endif
0019
0020 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
0021 extern bool task_cputime(struct task_struct *t,
0022 u64 *utime, u64 *stime);
0023 extern u64 task_gtime(struct task_struct *t);
0024 #else
0025 static inline bool task_cputime(struct task_struct *t,
0026 u64 *utime, u64 *stime)
0027 {
0028 *utime = t->utime;
0029 *stime = t->stime;
0030 return false;
0031 }
0032
0033 static inline u64 task_gtime(struct task_struct *t)
0034 {
0035 return t->gtime;
0036 }
0037 #endif
0038
0039 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
0040 static inline void task_cputime_scaled(struct task_struct *t,
0041 u64 *utimescaled,
0042 u64 *stimescaled)
0043 {
0044 *utimescaled = t->utimescaled;
0045 *stimescaled = t->stimescaled;
0046 }
0047 #else
0048 static inline void task_cputime_scaled(struct task_struct *t,
0049 u64 *utimescaled,
0050 u64 *stimescaled)
0051 {
0052 task_cputime(t, utimescaled, stimescaled);
0053 }
0054 #endif
0055
0056 extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
0057 extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
0058 extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
0059 u64 *ut, u64 *st);
0060
0061
0062
0063
0064 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
0065 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples);
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 #ifdef CONFIG_POSIX_TIMERS
0079 static inline
0080 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
0081 {
0082 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
0083
0084
0085
0086
0087
0088 if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active))
0089 return NULL;
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 if (unlikely(!tsk->sighand))
0106 return NULL;
0107
0108 return cputimer;
0109 }
0110 #else
0111 static inline
0112 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
0113 {
0114 return NULL;
0115 }
0116 #endif
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 static inline void account_group_user_time(struct task_struct *tsk,
0129 u64 cputime)
0130 {
0131 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
0132
0133 if (!cputimer)
0134 return;
0135
0136 atomic64_add(cputime, &cputimer->cputime_atomic.utime);
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 static inline void account_group_system_time(struct task_struct *tsk,
0150 u64 cputime)
0151 {
0152 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
0153
0154 if (!cputimer)
0155 return;
0156
0157 atomic64_add(cputime, &cputimer->cputime_atomic.stime);
0158 }
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 static inline void account_group_exec_runtime(struct task_struct *tsk,
0171 unsigned long long ns)
0172 {
0173 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
0174
0175 if (!cputimer)
0176 return;
0177
0178 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
0179 }
0180
0181 static inline void prev_cputime_init(struct prev_cputime *prev)
0182 {
0183 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0184 prev->utime = prev->stime = 0;
0185 raw_spin_lock_init(&prev->lock);
0186 #endif
0187 }
0188
0189 extern unsigned long long
0190 task_sched_runtime(struct task_struct *task);
0191
0192 #endif