0001
0002
0003
0004
0005
0006 #include <linux/clocksource.h>
0007 #include <linux/kernel.h>
0008 #include <linux/percpu.h>
0009 #include <linux/notifier.h>
0010 #include <linux/sched.h>
0011 #include <linux/gfp.h>
0012 #include <linux/memblock.h>
0013 #include <linux/nmi.h>
0014
0015 #include <asm/fixmap.h>
0016 #include <asm/pvclock.h>
0017 #include <asm/vgtod.h>
0018
0019 static u8 valid_flags __read_mostly = 0;
0020 static struct pvclock_vsyscall_time_info *pvti_cpu0_va __read_mostly;
0021
0022 void pvclock_set_flags(u8 flags)
0023 {
0024 valid_flags = flags;
0025 }
0026
0027 unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
0028 {
0029 u64 pv_tsc_khz = 1000000ULL << 32;
0030
0031 do_div(pv_tsc_khz, src->tsc_to_system_mul);
0032 if (src->tsc_shift < 0)
0033 pv_tsc_khz <<= -src->tsc_shift;
0034 else
0035 pv_tsc_khz >>= src->tsc_shift;
0036 return pv_tsc_khz;
0037 }
0038
0039 void pvclock_touch_watchdogs(void)
0040 {
0041 touch_softlockup_watchdog_sync();
0042 clocksource_touch_watchdog();
0043 rcu_cpu_stall_reset();
0044 reset_hung_task_detector();
0045 }
0046
0047 static atomic64_t last_value = ATOMIC64_INIT(0);
0048
0049 void pvclock_resume(void)
0050 {
0051 atomic64_set(&last_value, 0);
0052 }
0053
0054 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
0055 {
0056 unsigned version;
0057 u8 flags;
0058
0059 do {
0060 version = pvclock_read_begin(src);
0061 flags = src->flags;
0062 } while (pvclock_read_retry(src, version));
0063
0064 return flags & valid_flags;
0065 }
0066
0067 u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
0068 {
0069 unsigned version;
0070 u64 ret;
0071 u64 last;
0072 u8 flags;
0073
0074 do {
0075 version = pvclock_read_begin(src);
0076 ret = __pvclock_read_cycles(src, rdtsc_ordered());
0077 flags = src->flags;
0078 } while (pvclock_read_retry(src, version));
0079
0080 if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
0081 src->flags &= ~PVCLOCK_GUEST_STOPPED;
0082 pvclock_touch_watchdogs();
0083 }
0084
0085 if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) &&
0086 (flags & PVCLOCK_TSC_STABLE_BIT))
0087 return ret;
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 last = atomic64_read(&last_value);
0104 do {
0105 if (ret < last)
0106 return last;
0107 last = atomic64_cmpxchg(&last_value, last, ret);
0108 } while (unlikely(last != ret));
0109
0110 return ret;
0111 }
0112
0113 void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
0114 struct pvclock_vcpu_time_info *vcpu_time,
0115 struct timespec64 *ts)
0116 {
0117 u32 version;
0118 u64 delta;
0119 struct timespec64 now;
0120
0121
0122 do {
0123 version = wall_clock->version;
0124 rmb();
0125
0126
0127
0128
0129
0130
0131
0132 now.tv_sec = wall_clock->sec;
0133 now.tv_nsec = wall_clock->nsec;
0134 rmb();
0135 } while ((wall_clock->version & 1) || (version != wall_clock->version));
0136
0137 delta = pvclock_clocksource_read(vcpu_time);
0138 delta += now.tv_sec * NSEC_PER_SEC + now.tv_nsec;
0139
0140 now.tv_nsec = do_div(delta, NSEC_PER_SEC);
0141 now.tv_sec = delta;
0142
0143 set_normalized_timespec64(ts, now.tv_sec, now.tv_nsec);
0144 }
0145
0146 void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti)
0147 {
0148 WARN_ON(vclock_was_used(VDSO_CLOCKMODE_PVCLOCK));
0149 pvti_cpu0_va = pvti;
0150 }
0151
0152 struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
0153 {
0154 return pvti_cpu0_va;
0155 }
0156 EXPORT_SYMBOL_GPL(pvclock_get_pvti_cpu0_va);