Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_KERNEL_VTIME_H
0003 #define _LINUX_KERNEL_VTIME_H
0004 
0005 #include <linux/context_tracking_state.h>
0006 #include <linux/sched.h>
0007 
0008 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0009 #include <asm/vtime.h>
0010 #endif
0011 
0012 /*
0013  * Common vtime APIs
0014  */
0015 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
0016 extern void vtime_account_kernel(struct task_struct *tsk);
0017 extern void vtime_account_idle(struct task_struct *tsk);
0018 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
0019 
0020 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
0021 extern void arch_vtime_task_switch(struct task_struct *tsk);
0022 extern void vtime_user_enter(struct task_struct *tsk);
0023 extern void vtime_user_exit(struct task_struct *tsk);
0024 extern void vtime_guest_enter(struct task_struct *tsk);
0025 extern void vtime_guest_exit(struct task_struct *tsk);
0026 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
0027 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN  */
0028 static inline void vtime_user_enter(struct task_struct *tsk) { }
0029 static inline void vtime_user_exit(struct task_struct *tsk) { }
0030 static inline void vtime_guest_enter(struct task_struct *tsk) { }
0031 static inline void vtime_guest_exit(struct task_struct *tsk) { }
0032 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
0033 #endif
0034 
0035 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0036 extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
0037 extern void vtime_account_softirq(struct task_struct *tsk);
0038 extern void vtime_account_hardirq(struct task_struct *tsk);
0039 extern void vtime_flush(struct task_struct *tsk);
0040 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
0041 static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
0042 static inline void vtime_account_softirq(struct task_struct *tsk) { }
0043 static inline void vtime_account_hardirq(struct task_struct *tsk) { }
0044 static inline void vtime_flush(struct task_struct *tsk) { }
0045 #endif
0046 
0047 /*
0048  * vtime_accounting_enabled_this_cpu() definitions/declarations
0049  */
0050 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
0051 
0052 static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
0053 extern void vtime_task_switch(struct task_struct *prev);
0054 
0055 static __always_inline void vtime_account_guest_enter(void)
0056 {
0057     vtime_account_kernel(current);
0058     current->flags |= PF_VCPU;
0059 }
0060 
0061 static __always_inline void vtime_account_guest_exit(void)
0062 {
0063     vtime_account_kernel(current);
0064     current->flags &= ~PF_VCPU;
0065 }
0066 
0067 #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
0068 
0069 /*
0070  * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
0071  * in that case and compute the tickless cputime.
0072  * For now vtime state is tied to context tracking. We might want to decouple
0073  * those later if necessary.
0074  */
0075 static inline bool vtime_accounting_enabled(void)
0076 {
0077     return context_tracking_enabled();
0078 }
0079 
0080 static inline bool vtime_accounting_enabled_cpu(int cpu)
0081 {
0082     return context_tracking_enabled_cpu(cpu);
0083 }
0084 
0085 static inline bool vtime_accounting_enabled_this_cpu(void)
0086 {
0087     return context_tracking_enabled_this_cpu();
0088 }
0089 
0090 extern void vtime_task_switch_generic(struct task_struct *prev);
0091 
0092 static inline void vtime_task_switch(struct task_struct *prev)
0093 {
0094     if (vtime_accounting_enabled_this_cpu())
0095         vtime_task_switch_generic(prev);
0096 }
0097 
0098 static __always_inline void vtime_account_guest_enter(void)
0099 {
0100     if (vtime_accounting_enabled_this_cpu())
0101         vtime_guest_enter(current);
0102     else
0103         current->flags |= PF_VCPU;
0104 }
0105 
0106 static __always_inline void vtime_account_guest_exit(void)
0107 {
0108     if (vtime_accounting_enabled_this_cpu())
0109         vtime_guest_exit(current);
0110     else
0111         current->flags &= ~PF_VCPU;
0112 }
0113 
0114 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
0115 
0116 static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
0117 static inline void vtime_task_switch(struct task_struct *prev) { }
0118 
0119 static __always_inline void vtime_account_guest_enter(void)
0120 {
0121     current->flags |= PF_VCPU;
0122 }
0123 
0124 static __always_inline void vtime_account_guest_exit(void)
0125 {
0126     current->flags &= ~PF_VCPU;
0127 }
0128 
0129 #endif
0130 
0131 
0132 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
0133 extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
0134 #else
0135 static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
0136 #endif
0137 
0138 static inline void account_softirq_enter(struct task_struct *tsk)
0139 {
0140     vtime_account_irq(tsk, SOFTIRQ_OFFSET);
0141     irqtime_account_irq(tsk, SOFTIRQ_OFFSET);
0142 }
0143 
0144 static inline void account_softirq_exit(struct task_struct *tsk)
0145 {
0146     vtime_account_softirq(tsk);
0147     irqtime_account_irq(tsk, 0);
0148 }
0149 
0150 static inline void account_hardirq_enter(struct task_struct *tsk)
0151 {
0152     vtime_account_irq(tsk, HARDIRQ_OFFSET);
0153     irqtime_account_irq(tsk, HARDIRQ_OFFSET);
0154 }
0155 
0156 static inline void account_hardirq_exit(struct task_struct *tsk)
0157 {
0158     vtime_account_hardirq(tsk);
0159     irqtime_account_irq(tsk, 0);
0160 }
0161 
0162 #endif /* _LINUX_KERNEL_VTIME_H */