Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Copyright (C) 2018 ARM Limited
0004  */
0005 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
0006 #define __ASM_VDSO_GETTIMEOFDAY_H
0007 
0008 #ifndef __ASSEMBLY__
0009 
0010 #include <asm/barrier.h>
0011 #include <asm/unistd.h>
0012 #include <asm/errno.h>
0013 
0014 #include <asm/vdso/compat_barrier.h>
0015 
0016 #define VDSO_HAS_CLOCK_GETRES       1
0017 
0018 #define BUILD_VDSO32            1
0019 
0020 static __always_inline
0021 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
0022               struct timezone *_tz)
0023 {
0024     register struct timezone *tz asm("r1") = _tz;
0025     register struct __kernel_old_timeval *tv asm("r0") = _tv;
0026     register long ret asm ("r0");
0027     register long nr asm("r7") = __NR_compat_gettimeofday;
0028 
0029     asm volatile(
0030     "   swi #0\n"
0031     : "=r" (ret)
0032     : "r" (tv), "r" (tz), "r" (nr)
0033     : "memory");
0034 
0035     return ret;
0036 }
0037 
0038 static __always_inline
0039 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
0040 {
0041     register struct __kernel_timespec *ts asm("r1") = _ts;
0042     register clockid_t clkid asm("r0") = _clkid;
0043     register long ret asm ("r0");
0044     register long nr asm("r7") = __NR_compat_clock_gettime64;
0045 
0046     asm volatile(
0047     "   swi #0\n"
0048     : "=r" (ret)
0049     : "r" (clkid), "r" (ts), "r" (nr)
0050     : "memory");
0051 
0052     return ret;
0053 }
0054 
0055 static __always_inline
0056 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
0057 {
0058     register struct old_timespec32 *ts asm("r1") = _ts;
0059     register clockid_t clkid asm("r0") = _clkid;
0060     register long ret asm ("r0");
0061     register long nr asm("r7") = __NR_compat_clock_gettime;
0062 
0063     asm volatile(
0064     "   swi #0\n"
0065     : "=r" (ret)
0066     : "r" (clkid), "r" (ts), "r" (nr)
0067     : "memory");
0068 
0069     return ret;
0070 }
0071 
0072 static __always_inline
0073 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
0074 {
0075     register struct __kernel_timespec *ts asm("r1") = _ts;
0076     register clockid_t clkid asm("r0") = _clkid;
0077     register long ret asm ("r0");
0078     register long nr asm("r7") = __NR_compat_clock_getres_time64;
0079 
0080     asm volatile(
0081     "       swi #0\n"
0082     : "=r" (ret)
0083     : "r" (clkid), "r" (ts), "r" (nr)
0084     : "memory");
0085 
0086     return ret;
0087 }
0088 
0089 static __always_inline
0090 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
0091 {
0092     register struct old_timespec32 *ts asm("r1") = _ts;
0093     register clockid_t clkid asm("r0") = _clkid;
0094     register long ret asm ("r0");
0095     register long nr asm("r7") = __NR_compat_clock_getres;
0096 
0097     asm volatile(
0098     "       swi #0\n"
0099     : "=r" (ret)
0100     : "r" (clkid), "r" (ts), "r" (nr)
0101     : "memory");
0102 
0103     return ret;
0104 }
0105 
0106 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
0107                          const struct vdso_data *vd)
0108 {
0109     u64 res;
0110 
0111     /*
0112      * Core checks for mode already, so this raced against a concurrent
0113      * update. Return something. Core will do another round and then
0114      * see the mode change and fallback to the syscall.
0115      */
0116     if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
0117         return 0;
0118 
0119     /*
0120      * This isb() is required to prevent that the counter value
0121      * is speculated.
0122      */
0123     isb();
0124     asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
0125     /*
0126      * This isb() is required to prevent that the seq lock is
0127      * speculated.
0128      */
0129     isb();
0130 
0131     return res;
0132 }
0133 
0134 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
0135 {
0136     const struct vdso_data *ret;
0137 
0138     /*
0139      * This simply puts &_vdso_data into ret. The reason why we don't use
0140      * `ret = _vdso_data` is that the compiler tends to optimise this in a
0141      * very suboptimal way: instead of keeping &_vdso_data in a register,
0142      * it goes through a relocation almost every time _vdso_data must be
0143      * accessed (even in subfunctions). This is both time and space
0144      * consuming: each relocation uses a word in the code section, and it
0145      * has to be loaded at runtime.
0146      *
0147      * This trick hides the assignment from the compiler. Since it cannot
0148      * track where the pointer comes from, it will only use one relocation
0149      * where __arch_get_vdso_data() is called, and then keep the result in
0150      * a register.
0151      */
0152     asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
0153 
0154     return ret;
0155 }
0156 
0157 #ifdef CONFIG_TIME_NS
0158 static __always_inline
0159 const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
0160 {
0161     const struct vdso_data *ret;
0162 
0163     /* See __arch_get_vdso_data(). */
0164     asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
0165 
0166     return ret;
0167 }
0168 #endif
0169 
0170 static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
0171 {
0172     return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
0173 }
0174 #define vdso_clocksource_ok vdso_clocksource_ok
0175 
0176 #endif /* !__ASSEMBLY__ */
0177 
0178 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */