Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright 2019 ARM Ltd.
0004  *
0005  * Generic implementation of update_vsyscall and update_vsyscall_tz.
0006  *
0007  * Based on the x86 specific implementation.
0008  */
0009 
0010 #include <linux/hrtimer.h>
0011 #include <linux/timekeeper_internal.h>
0012 #include <vdso/datapage.h>
0013 #include <vdso/helpers.h>
0014 #include <vdso/vsyscall.h>
0015 
0016 #include "timekeeping_internal.h"
0017 
0018 static inline void update_vdso_data(struct vdso_data *vdata,
0019                     struct timekeeper *tk)
0020 {
0021     struct vdso_timestamp *vdso_ts;
0022     u64 nsec, sec;
0023 
0024     vdata[CS_HRES_COARSE].cycle_last    = tk->tkr_mono.cycle_last;
0025     vdata[CS_HRES_COARSE].mask      = tk->tkr_mono.mask;
0026     vdata[CS_HRES_COARSE].mult      = tk->tkr_mono.mult;
0027     vdata[CS_HRES_COARSE].shift     = tk->tkr_mono.shift;
0028     vdata[CS_RAW].cycle_last        = tk->tkr_raw.cycle_last;
0029     vdata[CS_RAW].mask          = tk->tkr_raw.mask;
0030     vdata[CS_RAW].mult          = tk->tkr_raw.mult;
0031     vdata[CS_RAW].shift         = tk->tkr_raw.shift;
0032 
0033     /* CLOCK_MONOTONIC */
0034     vdso_ts     = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
0035     vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
0036 
0037     nsec = tk->tkr_mono.xtime_nsec;
0038     nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
0039     while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
0040         nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
0041         vdso_ts->sec++;
0042     }
0043     vdso_ts->nsec   = nsec;
0044 
0045     /* Copy MONOTONIC time for BOOTTIME */
0046     sec = vdso_ts->sec;
0047     /* Add the boot offset */
0048     sec += tk->monotonic_to_boot.tv_sec;
0049     nsec    += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
0050 
0051     /* CLOCK_BOOTTIME */
0052     vdso_ts     = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
0053     vdso_ts->sec    = sec;
0054 
0055     while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
0056         nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
0057         vdso_ts->sec++;
0058     }
0059     vdso_ts->nsec   = nsec;
0060 
0061     /* CLOCK_MONOTONIC_RAW */
0062     vdso_ts     = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
0063     vdso_ts->sec    = tk->raw_sec;
0064     vdso_ts->nsec   = tk->tkr_raw.xtime_nsec;
0065 
0066     /* CLOCK_TAI */
0067     vdso_ts     = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
0068     vdso_ts->sec    = tk->xtime_sec + (s64)tk->tai_offset;
0069     vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
0070 }
0071 
0072 void update_vsyscall(struct timekeeper *tk)
0073 {
0074     struct vdso_data *vdata = __arch_get_k_vdso_data();
0075     struct vdso_timestamp *vdso_ts;
0076     s32 clock_mode;
0077     u64 nsec;
0078 
0079     /* copy vsyscall data */
0080     vdso_write_begin(vdata);
0081 
0082     clock_mode = tk->tkr_mono.clock->vdso_clock_mode;
0083     vdata[CS_HRES_COARSE].clock_mode    = clock_mode;
0084     vdata[CS_RAW].clock_mode        = clock_mode;
0085 
0086     /* CLOCK_REALTIME also required for time() */
0087     vdso_ts     = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
0088     vdso_ts->sec    = tk->xtime_sec;
0089     vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
0090 
0091     /* CLOCK_REALTIME_COARSE */
0092     vdso_ts     = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
0093     vdso_ts->sec    = tk->xtime_sec;
0094     vdso_ts->nsec   = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
0095 
0096     /* CLOCK_MONOTONIC_COARSE */
0097     vdso_ts     = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
0098     vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
0099     nsec        = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
0100     nsec        = nsec + tk->wall_to_monotonic.tv_nsec;
0101     vdso_ts->sec    += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
0102 
0103     /*
0104      * Read without the seqlock held by clock_getres().
0105      * Note: No need to have a second copy.
0106      */
0107     WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
0108 
0109     /*
0110      * If the current clocksource is not VDSO capable, then spare the
0111      * update of the high resolution parts.
0112      */
0113     if (clock_mode != VDSO_CLOCKMODE_NONE)
0114         update_vdso_data(vdata, tk);
0115 
0116     __arch_update_vsyscall(vdata, tk);
0117 
0118     vdso_write_end(vdata);
0119 
0120     __arch_sync_vdso_data(vdata);
0121 }
0122 
0123 void update_vsyscall_tz(void)
0124 {
0125     struct vdso_data *vdata = __arch_get_k_vdso_data();
0126 
0127     vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
0128     vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
0129 
0130     __arch_sync_vdso_data(vdata);
0131 }
0132 
0133 /**
0134  * vdso_update_begin - Start of a VDSO update section
0135  *
0136  * Allows architecture code to safely update the architecture specific VDSO
0137  * data. Disables interrupts, acquires timekeeper lock to serialize against
0138  * concurrent updates from timekeeping and invalidates the VDSO data
0139  * sequence counter to prevent concurrent readers from accessing
0140  * inconsistent data.
0141  *
0142  * Returns: Saved interrupt flags which need to be handed in to
0143  * vdso_update_end().
0144  */
0145 unsigned long vdso_update_begin(void)
0146 {
0147     struct vdso_data *vdata = __arch_get_k_vdso_data();
0148     unsigned long flags;
0149 
0150     raw_spin_lock_irqsave(&timekeeper_lock, flags);
0151     vdso_write_begin(vdata);
0152     return flags;
0153 }
0154 
0155 /**
0156  * vdso_update_end - End of a VDSO update section
0157  * @flags:  Interrupt flags as returned from vdso_update_begin()
0158  *
0159  * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data
0160  * synchronization if the architecture requires it, drops timekeeper lock
0161  * and restores interrupt flags.
0162  */
0163 void vdso_update_end(unsigned long flags)
0164 {
0165     struct vdso_data *vdata = __arch_get_k_vdso_data();
0166 
0167     vdso_write_end(vdata);
0168     __arch_sync_vdso_data(vdata);
0169     raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
0170 }