Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * arch/arm64/include/asm/arch_timer.h
0004  *
0005  * Copyright (C) 2012 ARM Ltd.
0006  * Author: Marc Zyngier <marc.zyngier@arm.com>
0007  */
0008 #ifndef __ASM_ARCH_TIMER_H
0009 #define __ASM_ARCH_TIMER_H
0010 
0011 #include <asm/barrier.h>
0012 #include <asm/hwcap.h>
0013 #include <asm/sysreg.h>
0014 
0015 #include <linux/bug.h>
0016 #include <linux/init.h>
0017 #include <linux/jump_label.h>
0018 #include <linux/smp.h>
0019 #include <linux/types.h>
0020 
0021 #include <clocksource/arm_arch_timer.h>
0022 
0023 #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
0024 #define has_erratum_handler(h)                      \
0025     ({                              \
0026         const struct arch_timer_erratum_workaround *__wa;   \
0027         __wa = __this_cpu_read(timer_unstable_counter_workaround); \
0028         (__wa && __wa->h);                  \
0029     })
0030 
0031 #define erratum_handler(h)                      \
0032     ({                              \
0033         const struct arch_timer_erratum_workaround *__wa;   \
0034         __wa = __this_cpu_read(timer_unstable_counter_workaround); \
0035         (__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \
0036     })
0037 
0038 #else
0039 #define has_erratum_handler(h)             false
0040 #define erratum_handler(h)             (arch_timer_##h)
0041 #endif
0042 
0043 enum arch_timer_erratum_match_type {
0044     ate_match_dt,
0045     ate_match_local_cap_id,
0046     ate_match_acpi_oem_info,
0047 };
0048 
0049 struct clock_event_device;
0050 
0051 struct arch_timer_erratum_workaround {
0052     enum arch_timer_erratum_match_type match_type;
0053     const void *id;
0054     const char *desc;
0055     u64 (*read_cntpct_el0)(void);
0056     u64 (*read_cntvct_el0)(void);
0057     int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
0058     int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
0059     bool disable_compat_vdso;
0060 };
0061 
0062 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
0063         timer_unstable_counter_workaround);
0064 
0065 static inline notrace u64 arch_timer_read_cntpct_el0(void)
0066 {
0067     u64 cnt;
0068 
0069     asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
0070                  "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
0071                  ARM64_HAS_ECV)
0072              : "=r" (cnt));
0073 
0074     return cnt;
0075 }
0076 
0077 static inline notrace u64 arch_timer_read_cntvct_el0(void)
0078 {
0079     u64 cnt;
0080 
0081     asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
0082                  "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
0083                  ARM64_HAS_ECV)
0084              : "=r" (cnt));
0085 
0086     return cnt;
0087 }
0088 
0089 #define arch_timer_reg_read_stable(reg)                 \
0090     ({                              \
0091         u64 _val;                       \
0092                                     \
0093         preempt_disable_notrace();              \
0094         _val = erratum_handler(read_ ## reg)();         \
0095         preempt_enable_notrace();               \
0096                                     \
0097         _val;                           \
0098     })
0099 
0100 /*
0101  * These register accessors are marked inline so the compiler can
0102  * nicely work out which register we want, and chuck away the rest of
0103  * the code.
0104  */
0105 static __always_inline
0106 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
0107 {
0108     if (access == ARCH_TIMER_PHYS_ACCESS) {
0109         switch (reg) {
0110         case ARCH_TIMER_REG_CTRL:
0111             write_sysreg(val, cntp_ctl_el0);
0112             isb();
0113             break;
0114         case ARCH_TIMER_REG_CVAL:
0115             write_sysreg(val, cntp_cval_el0);
0116             break;
0117         default:
0118             BUILD_BUG();
0119         }
0120     } else if (access == ARCH_TIMER_VIRT_ACCESS) {
0121         switch (reg) {
0122         case ARCH_TIMER_REG_CTRL:
0123             write_sysreg(val, cntv_ctl_el0);
0124             isb();
0125             break;
0126         case ARCH_TIMER_REG_CVAL:
0127             write_sysreg(val, cntv_cval_el0);
0128             break;
0129         default:
0130             BUILD_BUG();
0131         }
0132     } else {
0133         BUILD_BUG();
0134     }
0135 }
0136 
0137 static __always_inline
0138 u64 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
0139 {
0140     if (access == ARCH_TIMER_PHYS_ACCESS) {
0141         switch (reg) {
0142         case ARCH_TIMER_REG_CTRL:
0143             return read_sysreg(cntp_ctl_el0);
0144         default:
0145             BUILD_BUG();
0146         }
0147     } else if (access == ARCH_TIMER_VIRT_ACCESS) {
0148         switch (reg) {
0149         case ARCH_TIMER_REG_CTRL:
0150             return read_sysreg(cntv_ctl_el0);
0151         default:
0152             BUILD_BUG();
0153         }
0154     }
0155 
0156     BUILD_BUG();
0157     unreachable();
0158 }
0159 
0160 static inline u32 arch_timer_get_cntfrq(void)
0161 {
0162     return read_sysreg(cntfrq_el0);
0163 }
0164 
0165 static inline u32 arch_timer_get_cntkctl(void)
0166 {
0167     return read_sysreg(cntkctl_el1);
0168 }
0169 
0170 static inline void arch_timer_set_cntkctl(u32 cntkctl)
0171 {
0172     write_sysreg(cntkctl, cntkctl_el1);
0173     isb();
0174 }
0175 
0176 static __always_inline u64 __arch_counter_get_cntpct_stable(void)
0177 {
0178     u64 cnt;
0179 
0180     cnt = arch_timer_reg_read_stable(cntpct_el0);
0181     arch_counter_enforce_ordering(cnt);
0182     return cnt;
0183 }
0184 
0185 static __always_inline u64 __arch_counter_get_cntpct(void)
0186 {
0187     u64 cnt;
0188 
0189     asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
0190                  "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
0191                  ARM64_HAS_ECV)
0192              : "=r" (cnt));
0193     arch_counter_enforce_ordering(cnt);
0194     return cnt;
0195 }
0196 
0197 static __always_inline u64 __arch_counter_get_cntvct_stable(void)
0198 {
0199     u64 cnt;
0200 
0201     cnt = arch_timer_reg_read_stable(cntvct_el0);
0202     arch_counter_enforce_ordering(cnt);
0203     return cnt;
0204 }
0205 
0206 static __always_inline u64 __arch_counter_get_cntvct(void)
0207 {
0208     u64 cnt;
0209 
0210     asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
0211                  "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
0212                  ARM64_HAS_ECV)
0213              : "=r" (cnt));
0214     arch_counter_enforce_ordering(cnt);
0215     return cnt;
0216 }
0217 
0218 static inline int arch_timer_arch_init(void)
0219 {
0220     return 0;
0221 }
0222 
0223 static inline void arch_timer_set_evtstrm_feature(void)
0224 {
0225     cpu_set_named_feature(EVTSTRM);
0226 #ifdef CONFIG_COMPAT
0227     compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
0228 #endif
0229 }
0230 
0231 static inline bool arch_timer_have_evtstrm_feature(void)
0232 {
0233     return cpu_have_named_feature(EVTSTRM);
0234 }
0235 #endif