Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SCHED_CPUTIME_H
0003 #define _LINUX_SCHED_CPUTIME_H
0004 
0005 #include <linux/sched/signal.h>
0006 
0007 /*
0008  * cputime accounting APIs:
0009  */
0010 
0011 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0012 #include <asm/cputime.h>
0013 
0014 #ifndef cputime_to_nsecs
0015 # define cputime_to_nsecs(__ct) \
0016     (cputime_to_usecs(__ct) * NSEC_PER_USEC)
0017 #endif
0018 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
0019 
0020 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
0021 extern bool task_cputime(struct task_struct *t,
0022              u64 *utime, u64 *stime);
0023 extern u64 task_gtime(struct task_struct *t);
0024 #else
0025 static inline bool task_cputime(struct task_struct *t,
0026                 u64 *utime, u64 *stime)
0027 {
0028     *utime = t->utime;
0029     *stime = t->stime;
0030     return false;
0031 }
0032 
0033 static inline u64 task_gtime(struct task_struct *t)
0034 {
0035     return t->gtime;
0036 }
0037 #endif
0038 
0039 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
0040 static inline void task_cputime_scaled(struct task_struct *t,
0041                        u64 *utimescaled,
0042                        u64 *stimescaled)
0043 {
0044     *utimescaled = t->utimescaled;
0045     *stimescaled = t->stimescaled;
0046 }
0047 #else
0048 static inline void task_cputime_scaled(struct task_struct *t,
0049                        u64 *utimescaled,
0050                        u64 *stimescaled)
0051 {
0052     task_cputime(t, utimescaled, stimescaled);
0053 }
0054 #endif
0055 
0056 extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
0057 extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
0058 extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
0059                u64 *ut, u64 *st);
0060 
0061 /*
0062  * Thread group CPU time accounting.
0063  */
0064 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
0065 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples);
0066 
0067 /*
0068  * The following are functions that support scheduler-internal time accounting.
0069  * These functions are generally called at the timer tick.  None of this depends
0070  * on CONFIG_SCHEDSTATS.
0071  */
0072 
0073 /**
0074  * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active
0075  *
0076  * @tsk:    Pointer to target task.
0077  */
0078 #ifdef CONFIG_POSIX_TIMERS
0079 static inline
0080 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
0081 {
0082     struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
0083 
0084     /*
0085      * Check whether posix CPU timers are active. If not the thread
0086      * group accounting is not active either. Lockless check.
0087      */
0088     if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active))
0089         return NULL;
0090 
0091     /*
0092      * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
0093      * in __exit_signal(), we won't account to the signal struct further
0094      * cputime consumed by that task, even though the task can still be
0095      * ticking after __exit_signal().
0096      *
0097      * In order to keep a consistent behaviour between thread group cputime
0098      * and thread group cputimer accounting, lets also ignore the cputime
0099      * elapsing after __exit_signal() in any thread group timer running.
0100      *
0101      * This makes sure that POSIX CPU clocks and timers are synchronized, so
0102      * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
0103      * clock delta is behind the expiring timer value.
0104      */
0105     if (unlikely(!tsk->sighand))
0106         return NULL;
0107 
0108     return cputimer;
0109 }
0110 #else
0111 static inline
0112 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
0113 {
0114     return NULL;
0115 }
0116 #endif
0117 
0118 /**
0119  * account_group_user_time - Maintain utime for a thread group.
0120  *
0121  * @tsk:    Pointer to task structure.
0122  * @cputime:    Time value by which to increment the utime field of the
0123  *      thread_group_cputime structure.
0124  *
0125  * If thread group time is being maintained, get the structure for the
0126  * running CPU and update the utime field there.
0127  */
0128 static inline void account_group_user_time(struct task_struct *tsk,
0129                        u64 cputime)
0130 {
0131     struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
0132 
0133     if (!cputimer)
0134         return;
0135 
0136     atomic64_add(cputime, &cputimer->cputime_atomic.utime);
0137 }
0138 
0139 /**
0140  * account_group_system_time - Maintain stime for a thread group.
0141  *
0142  * @tsk:    Pointer to task structure.
0143  * @cputime:    Time value by which to increment the stime field of the
0144  *      thread_group_cputime structure.
0145  *
0146  * If thread group time is being maintained, get the structure for the
0147  * running CPU and update the stime field there.
0148  */
0149 static inline void account_group_system_time(struct task_struct *tsk,
0150                          u64 cputime)
0151 {
0152     struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
0153 
0154     if (!cputimer)
0155         return;
0156 
0157     atomic64_add(cputime, &cputimer->cputime_atomic.stime);
0158 }
0159 
0160 /**
0161  * account_group_exec_runtime - Maintain exec runtime for a thread group.
0162  *
0163  * @tsk:    Pointer to task structure.
0164  * @ns:     Time value by which to increment the sum_exec_runtime field
0165  *      of the thread_group_cputime structure.
0166  *
0167  * If thread group time is being maintained, get the structure for the
0168  * running CPU and update the sum_exec_runtime field there.
0169  */
0170 static inline void account_group_exec_runtime(struct task_struct *tsk,
0171                           unsigned long long ns)
0172 {
0173     struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
0174 
0175     if (!cputimer)
0176         return;
0177 
0178     atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
0179 }
0180 
0181 static inline void prev_cputime_init(struct prev_cputime *prev)
0182 {
0183 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0184     prev->utime = prev->stime = 0;
0185     raw_spin_lock_init(&prev->lock);
0186 #endif
0187 }
0188 
0189 extern unsigned long long
0190 task_sched_runtime(struct task_struct *task);
0191 
0192 #endif /* _LINUX_SCHED_CPUTIME_H */