Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * NTP state machine interfaces and logic.
0004  *
0005  * This code was mainly moved from kernel/timer.c and kernel/time.c
0006  * Please see those files for relevant copyright info and historical
0007  * changelogs.
0008  */
0009 #include <linux/capability.h>
0010 #include <linux/clocksource.h>
0011 #include <linux/workqueue.h>
0012 #include <linux/hrtimer.h>
0013 #include <linux/jiffies.h>
0014 #include <linux/math64.h>
0015 #include <linux/timex.h>
0016 #include <linux/time.h>
0017 #include <linux/mm.h>
0018 #include <linux/module.h>
0019 #include <linux/rtc.h>
0020 #include <linux/audit.h>
0021 
0022 #include "ntp_internal.h"
0023 #include "timekeeping_internal.h"
0024 
0025 
0026 /*
0027  * NTP timekeeping variables:
0028  *
0029  * Note: All of the NTP state is protected by the timekeeping locks.
0030  */
0031 
0032 
0033 /* USER_HZ period (usecs): */
0034 unsigned long           tick_usec = USER_TICK_USEC;
0035 
0036 /* SHIFTED_HZ period (nsecs): */
0037 unsigned long           tick_nsec;
0038 
0039 static u64          tick_length;
0040 static u64          tick_length_base;
0041 
0042 #define SECS_PER_DAY        86400
0043 #define MAX_TICKADJ     500LL       /* usecs */
0044 #define MAX_TICKADJ_SCALED \
0045     (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
0046 #define MAX_TAI_OFFSET      100000
0047 
0048 /*
0049  * phase-lock loop variables
0050  */
0051 
0052 /*
0053  * clock synchronization status
0054  *
0055  * (TIME_ERROR prevents overwriting the CMOS clock)
0056  */
0057 static int          time_state = TIME_OK;
0058 
0059 /* clock status bits:                           */
0060 static int          time_status = STA_UNSYNC;
0061 
0062 /* time adjustment (nsecs):                     */
0063 static s64          time_offset;
0064 
0065 /* pll time constant:                           */
0066 static long         time_constant = 2;
0067 
0068 /* maximum error (usecs):                       */
0069 static long         time_maxerror = NTP_PHASE_LIMIT;
0070 
0071 /* estimated error (usecs):                     */
0072 static long         time_esterror = NTP_PHASE_LIMIT;
0073 
0074 /* frequency offset (scaled nsecs/secs):                */
0075 static s64          time_freq;
0076 
0077 /* time at last adjustment (secs):                  */
0078 static time64_t     time_reftime;
0079 
0080 static long         time_adjust;
0081 
0082 /* constant (boot-param configurable) NTP tick adjustment (upscaled)    */
0083 static s64          ntp_tick_adj;
0084 
0085 /* second value of the next pending leapsecond, or TIME64_MAX if no leap */
0086 static time64_t         ntp_next_leap_sec = TIME64_MAX;
0087 
0088 #ifdef CONFIG_NTP_PPS
0089 
0090 /*
0091  * The following variables are used when a pulse-per-second (PPS) signal
0092  * is available. They establish the engineering parameters of the clock
0093  * discipline loop when controlled by the PPS signal.
0094  */
0095 #define PPS_VALID   10  /* PPS signal watchdog max (s) */
0096 #define PPS_POPCORN 4   /* popcorn spike threshold (shift) */
0097 #define PPS_INTMIN  2   /* min freq interval (s) (shift) */
0098 #define PPS_INTMAX  8   /* max freq interval (s) (shift) */
0099 #define PPS_INTCOUNT    4   /* number of consecutive good intervals to
0100                    increase pps_shift or consecutive bad
0101                    intervals to decrease it */
0102 #define PPS_MAXWANDER   100000  /* max PPS freq wander (ns/s) */
0103 
0104 static int pps_valid;       /* signal watchdog counter */
0105 static long pps_tf[3];      /* phase median filter */
0106 static long pps_jitter;     /* current jitter (ns) */
0107 static struct timespec64 pps_fbase; /* beginning of the last freq interval */
0108 static int pps_shift;       /* current interval duration (s) (shift) */
0109 static int pps_intcnt;      /* interval counter */
0110 static s64 pps_freq;        /* frequency offset (scaled ns/s) */
0111 static long pps_stabil;     /* current stability (scaled ns/s) */
0112 
0113 /*
0114  * PPS signal quality monitors
0115  */
0116 static long pps_calcnt;     /* calibration intervals */
0117 static long pps_jitcnt;     /* jitter limit exceeded */
0118 static long pps_stbcnt;     /* stability limit exceeded */
0119 static long pps_errcnt;     /* calibration errors */
0120 
0121 
0122 /* PPS kernel consumer compensates the whole phase error immediately.
0123  * Otherwise, reduce the offset by a fixed factor times the time constant.
0124  */
0125 static inline s64 ntp_offset_chunk(s64 offset)
0126 {
0127     if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
0128         return offset;
0129     else
0130         return shift_right(offset, SHIFT_PLL + time_constant);
0131 }
0132 
0133 static inline void pps_reset_freq_interval(void)
0134 {
0135     /* the PPS calibration interval may end
0136        surprisingly early */
0137     pps_shift = PPS_INTMIN;
0138     pps_intcnt = 0;
0139 }
0140 
0141 /**
0142  * pps_clear - Clears the PPS state variables
0143  */
0144 static inline void pps_clear(void)
0145 {
0146     pps_reset_freq_interval();
0147     pps_tf[0] = 0;
0148     pps_tf[1] = 0;
0149     pps_tf[2] = 0;
0150     pps_fbase.tv_sec = pps_fbase.tv_nsec = 0;
0151     pps_freq = 0;
0152 }
0153 
0154 /* Decrease pps_valid to indicate that another second has passed since
0155  * the last PPS signal. When it reaches 0, indicate that PPS signal is
0156  * missing.
0157  */
0158 static inline void pps_dec_valid(void)
0159 {
0160     if (pps_valid > 0)
0161         pps_valid--;
0162     else {
0163         time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
0164                  STA_PPSWANDER | STA_PPSERROR);
0165         pps_clear();
0166     }
0167 }
0168 
0169 static inline void pps_set_freq(s64 freq)
0170 {
0171     pps_freq = freq;
0172 }
0173 
0174 static inline int is_error_status(int status)
0175 {
0176     return (status & (STA_UNSYNC|STA_CLOCKERR))
0177         /* PPS signal lost when either PPS time or
0178          * PPS frequency synchronization requested
0179          */
0180         || ((status & (STA_PPSFREQ|STA_PPSTIME))
0181             && !(status & STA_PPSSIGNAL))
0182         /* PPS jitter exceeded when
0183          * PPS time synchronization requested */
0184         || ((status & (STA_PPSTIME|STA_PPSJITTER))
0185             == (STA_PPSTIME|STA_PPSJITTER))
0186         /* PPS wander exceeded or calibration error when
0187          * PPS frequency synchronization requested
0188          */
0189         || ((status & STA_PPSFREQ)
0190             && (status & (STA_PPSWANDER|STA_PPSERROR)));
0191 }
0192 
0193 static inline void pps_fill_timex(struct __kernel_timex *txc)
0194 {
0195     txc->ppsfreq       = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
0196                      PPM_SCALE_INV, NTP_SCALE_SHIFT);
0197     txc->jitter    = pps_jitter;
0198     if (!(time_status & STA_NANO))
0199         txc->jitter = pps_jitter / NSEC_PER_USEC;
0200     txc->shift     = pps_shift;
0201     txc->stabil    = pps_stabil;
0202     txc->jitcnt    = pps_jitcnt;
0203     txc->calcnt    = pps_calcnt;
0204     txc->errcnt    = pps_errcnt;
0205     txc->stbcnt    = pps_stbcnt;
0206 }
0207 
0208 #else /* !CONFIG_NTP_PPS */
0209 
0210 static inline s64 ntp_offset_chunk(s64 offset)
0211 {
0212     return shift_right(offset, SHIFT_PLL + time_constant);
0213 }
0214 
0215 static inline void pps_reset_freq_interval(void) {}
0216 static inline void pps_clear(void) {}
0217 static inline void pps_dec_valid(void) {}
0218 static inline void pps_set_freq(s64 freq) {}
0219 
0220 static inline int is_error_status(int status)
0221 {
0222     return status & (STA_UNSYNC|STA_CLOCKERR);
0223 }
0224 
0225 static inline void pps_fill_timex(struct __kernel_timex *txc)
0226 {
0227     /* PPS is not implemented, so these are zero */
0228     txc->ppsfreq       = 0;
0229     txc->jitter    = 0;
0230     txc->shift     = 0;
0231     txc->stabil    = 0;
0232     txc->jitcnt    = 0;
0233     txc->calcnt    = 0;
0234     txc->errcnt    = 0;
0235     txc->stbcnt    = 0;
0236 }
0237 
0238 #endif /* CONFIG_NTP_PPS */
0239 
0240 
0241 /**
0242  * ntp_synced - Returns 1 if the NTP status is not UNSYNC
0243  *
0244  */
0245 static inline int ntp_synced(void)
0246 {
0247     return !(time_status & STA_UNSYNC);
0248 }
0249 
0250 
0251 /*
0252  * NTP methods:
0253  */
0254 
0255 /*
0256  * Update (tick_length, tick_length_base, tick_nsec), based
0257  * on (tick_usec, ntp_tick_adj, time_freq):
0258  */
0259 static void ntp_update_frequency(void)
0260 {
0261     u64 second_length;
0262     u64 new_base;
0263 
0264     second_length        = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
0265                         << NTP_SCALE_SHIFT;
0266 
0267     second_length       += ntp_tick_adj;
0268     second_length       += time_freq;
0269 
0270     tick_nsec        = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
0271     new_base         = div_u64(second_length, NTP_INTERVAL_FREQ);
0272 
0273     /*
0274      * Don't wait for the next second_overflow, apply
0275      * the change to the tick length immediately:
0276      */
0277     tick_length     += new_base - tick_length_base;
0278     tick_length_base     = new_base;
0279 }
0280 
0281 static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
0282 {
0283     time_status &= ~STA_MODE;
0284 
0285     if (secs < MINSEC)
0286         return 0;
0287 
0288     if (!(time_status & STA_FLL) && (secs <= MAXSEC))
0289         return 0;
0290 
0291     time_status |= STA_MODE;
0292 
0293     return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
0294 }
0295 
0296 static void ntp_update_offset(long offset)
0297 {
0298     s64 freq_adj;
0299     s64 offset64;
0300     long secs;
0301 
0302     if (!(time_status & STA_PLL))
0303         return;
0304 
0305     if (!(time_status & STA_NANO)) {
0306         /* Make sure the multiplication below won't overflow */
0307         offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC);
0308         offset *= NSEC_PER_USEC;
0309     }
0310 
0311     /*
0312      * Scale the phase adjustment and
0313      * clamp to the operating range.
0314      */
0315     offset = clamp(offset, -MAXPHASE, MAXPHASE);
0316 
0317     /*
0318      * Select how the frequency is to be controlled
0319      * and in which mode (PLL or FLL).
0320      */
0321     secs = (long)(__ktime_get_real_seconds() - time_reftime);
0322     if (unlikely(time_status & STA_FREQHOLD))
0323         secs = 0;
0324 
0325     time_reftime = __ktime_get_real_seconds();
0326 
0327     offset64    = offset;
0328     freq_adj    = ntp_update_offset_fll(offset64, secs);
0329 
0330     /*
0331      * Clamp update interval to reduce PLL gain with low
0332      * sampling rate (e.g. intermittent network connection)
0333      * to avoid instability.
0334      */
0335     if (unlikely(secs > 1 << (SHIFT_PLL + 1 + time_constant)))
0336         secs = 1 << (SHIFT_PLL + 1 + time_constant);
0337 
0338     freq_adj    += (offset64 * secs) <<
0339             (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
0340 
0341     freq_adj    = min(freq_adj + time_freq, MAXFREQ_SCALED);
0342 
0343     time_freq   = max(freq_adj, -MAXFREQ_SCALED);
0344 
0345     time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
0346 }
0347 
0348 /**
0349  * ntp_clear - Clears the NTP state variables
0350  */
0351 void ntp_clear(void)
0352 {
0353     time_adjust = 0;        /* stop active adjtime() */
0354     time_status |= STA_UNSYNC;
0355     time_maxerror   = NTP_PHASE_LIMIT;
0356     time_esterror   = NTP_PHASE_LIMIT;
0357 
0358     ntp_update_frequency();
0359 
0360     tick_length = tick_length_base;
0361     time_offset = 0;
0362 
0363     ntp_next_leap_sec = TIME64_MAX;
0364     /* Clear PPS state variables */
0365     pps_clear();
0366 }
0367 
0368 
0369 u64 ntp_tick_length(void)
0370 {
0371     return tick_length;
0372 }
0373 
0374 /**
0375  * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t
0376  *
0377  * Provides the time of the next leapsecond against CLOCK_REALTIME in
0378  * a ktime_t format. Returns KTIME_MAX if no leapsecond is pending.
0379  */
0380 ktime_t ntp_get_next_leap(void)
0381 {
0382     ktime_t ret;
0383 
0384     if ((time_state == TIME_INS) && (time_status & STA_INS))
0385         return ktime_set(ntp_next_leap_sec, 0);
0386     ret = KTIME_MAX;
0387     return ret;
0388 }
0389 
0390 /*
0391  * this routine handles the overflow of the microsecond field
0392  *
0393  * The tricky bits of code to handle the accurate clock support
0394  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
0395  * They were originally developed for SUN and DEC kernels.
0396  * All the kudos should go to Dave for this stuff.
0397  *
0398  * Also handles leap second processing, and returns leap offset
0399  */
0400 int second_overflow(time64_t secs)
0401 {
0402     s64 delta;
0403     int leap = 0;
0404     s32 rem;
0405 
0406     /*
0407      * Leap second processing. If in leap-insert state at the end of the
0408      * day, the system clock is set back one second; if in leap-delete
0409      * state, the system clock is set ahead one second.
0410      */
0411     switch (time_state) {
0412     case TIME_OK:
0413         if (time_status & STA_INS) {
0414             time_state = TIME_INS;
0415             div_s64_rem(secs, SECS_PER_DAY, &rem);
0416             ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
0417         } else if (time_status & STA_DEL) {
0418             time_state = TIME_DEL;
0419             div_s64_rem(secs + 1, SECS_PER_DAY, &rem);
0420             ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
0421         }
0422         break;
0423     case TIME_INS:
0424         if (!(time_status & STA_INS)) {
0425             ntp_next_leap_sec = TIME64_MAX;
0426             time_state = TIME_OK;
0427         } else if (secs == ntp_next_leap_sec) {
0428             leap = -1;
0429             time_state = TIME_OOP;
0430             printk(KERN_NOTICE
0431                 "Clock: inserting leap second 23:59:60 UTC\n");
0432         }
0433         break;
0434     case TIME_DEL:
0435         if (!(time_status & STA_DEL)) {
0436             ntp_next_leap_sec = TIME64_MAX;
0437             time_state = TIME_OK;
0438         } else if (secs == ntp_next_leap_sec) {
0439             leap = 1;
0440             ntp_next_leap_sec = TIME64_MAX;
0441             time_state = TIME_WAIT;
0442             printk(KERN_NOTICE
0443                 "Clock: deleting leap second 23:59:59 UTC\n");
0444         }
0445         break;
0446     case TIME_OOP:
0447         ntp_next_leap_sec = TIME64_MAX;
0448         time_state = TIME_WAIT;
0449         break;
0450     case TIME_WAIT:
0451         if (!(time_status & (STA_INS | STA_DEL)))
0452             time_state = TIME_OK;
0453         break;
0454     }
0455 
0456 
0457     /* Bump the maxerror field */
0458     time_maxerror += MAXFREQ / NSEC_PER_USEC;
0459     if (time_maxerror > NTP_PHASE_LIMIT) {
0460         time_maxerror = NTP_PHASE_LIMIT;
0461         time_status |= STA_UNSYNC;
0462     }
0463 
0464     /* Compute the phase adjustment for the next second */
0465     tick_length  = tick_length_base;
0466 
0467     delta        = ntp_offset_chunk(time_offset);
0468     time_offset -= delta;
0469     tick_length += delta;
0470 
0471     /* Check PPS signal */
0472     pps_dec_valid();
0473 
0474     if (!time_adjust)
0475         goto out;
0476 
0477     if (time_adjust > MAX_TICKADJ) {
0478         time_adjust -= MAX_TICKADJ;
0479         tick_length += MAX_TICKADJ_SCALED;
0480         goto out;
0481     }
0482 
0483     if (time_adjust < -MAX_TICKADJ) {
0484         time_adjust += MAX_TICKADJ;
0485         tick_length -= MAX_TICKADJ_SCALED;
0486         goto out;
0487     }
0488 
0489     tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
0490                              << NTP_SCALE_SHIFT;
0491     time_adjust = 0;
0492 
0493 out:
0494     return leap;
0495 }
0496 
0497 #if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
0498 static void sync_hw_clock(struct work_struct *work);
0499 static DECLARE_WORK(sync_work, sync_hw_clock);
0500 static struct hrtimer sync_hrtimer;
0501 #define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
0502 
0503 static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
0504 {
0505     queue_work(system_freezable_power_efficient_wq, &sync_work);
0506 
0507     return HRTIMER_NORESTART;
0508 }
0509 
0510 static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
0511 {
0512     ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
0513 
0514     if (retry)
0515         exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
0516     else
0517         exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
0518 
0519     hrtimer_start(&sync_hrtimer, exp, HRTIMER_MODE_ABS);
0520 }
0521 
0522 /*
0523  * Check whether @now is correct versus the required time to update the RTC
0524  * and calculate the value which needs to be written to the RTC so that the
0525  * next seconds increment of the RTC after the write is aligned with the next
0526  * seconds increment of clock REALTIME.
0527  *
0528  * tsched     t1 write(t2.tv_sec - 1sec))   t2 RTC increments seconds
0529  *
0530  * t2.tv_nsec == 0
0531  * tsched = t2 - set_offset_nsec
0532  * newval = t2 - NSEC_PER_SEC
0533  *
0534  * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC
0535  *
0536  * As the execution of this code is not guaranteed to happen exactly at
0537  * tsched this allows it to happen within a fuzzy region:
0538  *
0539  *  abs(now - tsched) < FUZZ
0540  *
0541  * If @now is not inside the allowed window the function returns false.
0542  */
0543 static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec,
0544                   struct timespec64 *to_set,
0545                   const struct timespec64 *now)
0546 {
0547     /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */
0548     const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
0549     struct timespec64 delay = {.tv_sec = -1,
0550                    .tv_nsec = set_offset_nsec};
0551 
0552     *to_set = timespec64_add(*now, delay);
0553 
0554     if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
0555         to_set->tv_nsec = 0;
0556         return true;
0557     }
0558 
0559     if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
0560         to_set->tv_sec++;
0561         to_set->tv_nsec = 0;
0562         return true;
0563     }
0564     return false;
0565 }
0566 
0567 #ifdef CONFIG_GENERIC_CMOS_UPDATE
0568 int __weak update_persistent_clock64(struct timespec64 now64)
0569 {
0570     return -ENODEV;
0571 }
0572 #else
0573 static inline int update_persistent_clock64(struct timespec64 now64)
0574 {
0575     return -ENODEV;
0576 }
0577 #endif
0578 
0579 #ifdef CONFIG_RTC_SYSTOHC
0580 /* Save NTP synchronized time to the RTC */
0581 static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
0582 {
0583     struct rtc_device *rtc;
0584     struct rtc_time tm;
0585     int err = -ENODEV;
0586 
0587     rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
0588     if (!rtc)
0589         return -ENODEV;
0590 
0591     if (!rtc->ops || !rtc->ops->set_time)
0592         goto out_close;
0593 
0594     /* First call might not have the correct offset */
0595     if (*offset_nsec == rtc->set_offset_nsec) {
0596         rtc_time64_to_tm(to_set->tv_sec, &tm);
0597         err = rtc_set_time(rtc, &tm);
0598     } else {
0599         /* Store the update offset and let the caller try again */
0600         *offset_nsec = rtc->set_offset_nsec;
0601         err = -EAGAIN;
0602     }
0603 out_close:
0604     rtc_class_close(rtc);
0605     return err;
0606 }
0607 #else
0608 static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
0609 {
0610     return -ENODEV;
0611 }
0612 #endif
0613 
0614 /*
0615  * If we have an externally synchronized Linux clock, then update RTC clock
0616  * accordingly every ~11 minutes. Generally RTCs can only store second
0617  * precision, but many RTCs will adjust the phase of their second tick to
0618  * match the moment of update. This infrastructure arranges to call to the RTC
0619  * set at the correct moment to phase synchronize the RTC second tick over
0620  * with the kernel clock.
0621  */
0622 static void sync_hw_clock(struct work_struct *work)
0623 {
0624     /*
0625      * The default synchronization offset is 500ms for the deprecated
0626      * update_persistent_clock64() under the assumption that it uses
0627      * the infamous CMOS clock (MC146818).
0628      */
0629     static unsigned long offset_nsec = NSEC_PER_SEC / 2;
0630     struct timespec64 now, to_set;
0631     int res = -EAGAIN;
0632 
0633     /*
0634      * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer()
0635      * managed to schedule the work between the timer firing and the
0636      * work being able to rearm the timer. Wait for the timer to expire.
0637      */
0638     if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer))
0639         return;
0640 
0641     ktime_get_real_ts64(&now);
0642     /* If @now is not in the allowed window, try again */
0643     if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now))
0644         goto rearm;
0645 
0646     /* Take timezone adjusted RTCs into account */
0647     if (persistent_clock_is_local)
0648         to_set.tv_sec -= (sys_tz.tz_minuteswest * 60);
0649 
0650     /* Try the legacy RTC first. */
0651     res = update_persistent_clock64(to_set);
0652     if (res != -ENODEV)
0653         goto rearm;
0654 
0655     /* Try the RTC class */
0656     res = update_rtc(&to_set, &offset_nsec);
0657     if (res == -ENODEV)
0658         return;
0659 rearm:
0660     sched_sync_hw_clock(offset_nsec, res != 0);
0661 }
0662 
0663 void ntp_notify_cmos_timer(void)
0664 {
0665     /*
0666      * When the work is currently executed but has not yet the timer
0667      * rearmed this queues the work immediately again. No big issue,
0668      * just a pointless work scheduled.
0669      */
0670     if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer))
0671         queue_work(system_freezable_power_efficient_wq, &sync_work);
0672 }
0673 
0674 static void __init ntp_init_cmos_sync(void)
0675 {
0676     hrtimer_init(&sync_hrtimer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
0677     sync_hrtimer.function = sync_timer_callback;
0678 }
0679 #else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
0680 static inline void __init ntp_init_cmos_sync(void) { }
0681 #endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
0682 
0683 /*
0684  * Propagate a new txc->status value into the NTP state:
0685  */
0686 static inline void process_adj_status(const struct __kernel_timex *txc)
0687 {
0688     if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
0689         time_state = TIME_OK;
0690         time_status = STA_UNSYNC;
0691         ntp_next_leap_sec = TIME64_MAX;
0692         /* restart PPS frequency calibration */
0693         pps_reset_freq_interval();
0694     }
0695 
0696     /*
0697      * If we turn on PLL adjustments then reset the
0698      * reference time to current time.
0699      */
0700     if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
0701         time_reftime = __ktime_get_real_seconds();
0702 
0703     /* only set allowed bits */
0704     time_status &= STA_RONLY;
0705     time_status |= txc->status & ~STA_RONLY;
0706 }
0707 
0708 
0709 static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
0710                       s32 *time_tai)
0711 {
0712     if (txc->modes & ADJ_STATUS)
0713         process_adj_status(txc);
0714 
0715     if (txc->modes & ADJ_NANO)
0716         time_status |= STA_NANO;
0717 
0718     if (txc->modes & ADJ_MICRO)
0719         time_status &= ~STA_NANO;
0720 
0721     if (txc->modes & ADJ_FREQUENCY) {
0722         time_freq = txc->freq * PPM_SCALE;
0723         time_freq = min(time_freq, MAXFREQ_SCALED);
0724         time_freq = max(time_freq, -MAXFREQ_SCALED);
0725         /* update pps_freq */
0726         pps_set_freq(time_freq);
0727     }
0728 
0729     if (txc->modes & ADJ_MAXERROR)
0730         time_maxerror = txc->maxerror;
0731 
0732     if (txc->modes & ADJ_ESTERROR)
0733         time_esterror = txc->esterror;
0734 
0735     if (txc->modes & ADJ_TIMECONST) {
0736         time_constant = txc->constant;
0737         if (!(time_status & STA_NANO))
0738             time_constant += 4;
0739         time_constant = min(time_constant, (long)MAXTC);
0740         time_constant = max(time_constant, 0l);
0741     }
0742 
0743     if (txc->modes & ADJ_TAI &&
0744             txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
0745         *time_tai = txc->constant;
0746 
0747     if (txc->modes & ADJ_OFFSET)
0748         ntp_update_offset(txc->offset);
0749 
0750     if (txc->modes & ADJ_TICK)
0751         tick_usec = txc->tick;
0752 
0753     if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
0754         ntp_update_frequency();
0755 }
0756 
0757 
0758 /*
0759  * adjtimex mainly allows reading (and writing, if superuser) of
0760  * kernel time-keeping variables. used by xntpd.
0761  */
0762 int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
0763           s32 *time_tai, struct audit_ntp_data *ad)
0764 {
0765     int result;
0766 
0767     if (txc->modes & ADJ_ADJTIME) {
0768         long save_adjust = time_adjust;
0769 
0770         if (!(txc->modes & ADJ_OFFSET_READONLY)) {
0771             /* adjtime() is independent from ntp_adjtime() */
0772             time_adjust = txc->offset;
0773             ntp_update_frequency();
0774 
0775             audit_ntp_set_old(ad, AUDIT_NTP_ADJUST, save_adjust);
0776             audit_ntp_set_new(ad, AUDIT_NTP_ADJUST, time_adjust);
0777         }
0778         txc->offset = save_adjust;
0779     } else {
0780         /* If there are input parameters, then process them: */
0781         if (txc->modes) {
0782             audit_ntp_set_old(ad, AUDIT_NTP_OFFSET, time_offset);
0783             audit_ntp_set_old(ad, AUDIT_NTP_FREQ,   time_freq);
0784             audit_ntp_set_old(ad, AUDIT_NTP_STATUS, time_status);
0785             audit_ntp_set_old(ad, AUDIT_NTP_TAI,    *time_tai);
0786             audit_ntp_set_old(ad, AUDIT_NTP_TICK,   tick_usec);
0787 
0788             process_adjtimex_modes(txc, time_tai);
0789 
0790             audit_ntp_set_new(ad, AUDIT_NTP_OFFSET, time_offset);
0791             audit_ntp_set_new(ad, AUDIT_NTP_FREQ,   time_freq);
0792             audit_ntp_set_new(ad, AUDIT_NTP_STATUS, time_status);
0793             audit_ntp_set_new(ad, AUDIT_NTP_TAI,    *time_tai);
0794             audit_ntp_set_new(ad, AUDIT_NTP_TICK,   tick_usec);
0795         }
0796 
0797         txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
0798                   NTP_SCALE_SHIFT);
0799         if (!(time_status & STA_NANO))
0800             txc->offset = (u32)txc->offset / NSEC_PER_USEC;
0801     }
0802 
0803     result = time_state;    /* mostly `TIME_OK' */
0804     /* check for errors */
0805     if (is_error_status(time_status))
0806         result = TIME_ERROR;
0807 
0808     txc->freq      = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
0809                      PPM_SCALE_INV, NTP_SCALE_SHIFT);
0810     txc->maxerror      = time_maxerror;
0811     txc->esterror      = time_esterror;
0812     txc->status    = time_status;
0813     txc->constant      = time_constant;
0814     txc->precision     = 1;
0815     txc->tolerance     = MAXFREQ_SCALED / PPM_SCALE;
0816     txc->tick      = tick_usec;
0817     txc->tai       = *time_tai;
0818 
0819     /* fill PPS status fields */
0820     pps_fill_timex(txc);
0821 
0822     txc->time.tv_sec = ts->tv_sec;
0823     txc->time.tv_usec = ts->tv_nsec;
0824     if (!(time_status & STA_NANO))
0825         txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC;
0826 
0827     /* Handle leapsec adjustments */
0828     if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) {
0829         if ((time_state == TIME_INS) && (time_status & STA_INS)) {
0830             result = TIME_OOP;
0831             txc->tai++;
0832             txc->time.tv_sec--;
0833         }
0834         if ((time_state == TIME_DEL) && (time_status & STA_DEL)) {
0835             result = TIME_WAIT;
0836             txc->tai--;
0837             txc->time.tv_sec++;
0838         }
0839         if ((time_state == TIME_OOP) &&
0840                     (ts->tv_sec == ntp_next_leap_sec)) {
0841             result = TIME_WAIT;
0842         }
0843     }
0844 
0845     return result;
0846 }
0847 
0848 #ifdef  CONFIG_NTP_PPS
0849 
0850 /* actually struct pps_normtime is good old struct timespec, but it is
0851  * semantically different (and it is the reason why it was invented):
0852  * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
0853  * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
0854 struct pps_normtime {
0855     s64     sec;    /* seconds */
0856     long        nsec;   /* nanoseconds */
0857 };
0858 
0859 /* normalize the timestamp so that nsec is in the
0860    ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
0861 static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts)
0862 {
0863     struct pps_normtime norm = {
0864         .sec = ts.tv_sec,
0865         .nsec = ts.tv_nsec
0866     };
0867 
0868     if (norm.nsec > (NSEC_PER_SEC >> 1)) {
0869         norm.nsec -= NSEC_PER_SEC;
0870         norm.sec++;
0871     }
0872 
0873     return norm;
0874 }
0875 
0876 /* get current phase correction and jitter */
0877 static inline long pps_phase_filter_get(long *jitter)
0878 {
0879     *jitter = pps_tf[0] - pps_tf[1];
0880     if (*jitter < 0)
0881         *jitter = -*jitter;
0882 
0883     /* TODO: test various filters */
0884     return pps_tf[0];
0885 }
0886 
0887 /* add the sample to the phase filter */
0888 static inline void pps_phase_filter_add(long err)
0889 {
0890     pps_tf[2] = pps_tf[1];
0891     pps_tf[1] = pps_tf[0];
0892     pps_tf[0] = err;
0893 }
0894 
0895 /* decrease frequency calibration interval length.
0896  * It is halved after four consecutive unstable intervals.
0897  */
0898 static inline void pps_dec_freq_interval(void)
0899 {
0900     if (--pps_intcnt <= -PPS_INTCOUNT) {
0901         pps_intcnt = -PPS_INTCOUNT;
0902         if (pps_shift > PPS_INTMIN) {
0903             pps_shift--;
0904             pps_intcnt = 0;
0905         }
0906     }
0907 }
0908 
0909 /* increase frequency calibration interval length.
0910  * It is doubled after four consecutive stable intervals.
0911  */
0912 static inline void pps_inc_freq_interval(void)
0913 {
0914     if (++pps_intcnt >= PPS_INTCOUNT) {
0915         pps_intcnt = PPS_INTCOUNT;
0916         if (pps_shift < PPS_INTMAX) {
0917             pps_shift++;
0918             pps_intcnt = 0;
0919         }
0920     }
0921 }
0922 
0923 /* update clock frequency based on MONOTONIC_RAW clock PPS signal
0924  * timestamps
0925  *
0926  * At the end of the calibration interval the difference between the
0927  * first and last MONOTONIC_RAW clock timestamps divided by the length
0928  * of the interval becomes the frequency update. If the interval was
0929  * too long, the data are discarded.
0930  * Returns the difference between old and new frequency values.
0931  */
0932 static long hardpps_update_freq(struct pps_normtime freq_norm)
0933 {
0934     long delta, delta_mod;
0935     s64 ftemp;
0936 
0937     /* check if the frequency interval was too long */
0938     if (freq_norm.sec > (2 << pps_shift)) {
0939         time_status |= STA_PPSERROR;
0940         pps_errcnt++;
0941         pps_dec_freq_interval();
0942         printk_deferred(KERN_ERR
0943             "hardpps: PPSERROR: interval too long - %lld s\n",
0944             freq_norm.sec);
0945         return 0;
0946     }
0947 
0948     /* here the raw frequency offset and wander (stability) is
0949      * calculated. If the wander is less than the wander threshold
0950      * the interval is increased; otherwise it is decreased.
0951      */
0952     ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
0953             freq_norm.sec);
0954     delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
0955     pps_freq = ftemp;
0956     if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
0957         printk_deferred(KERN_WARNING
0958                 "hardpps: PPSWANDER: change=%ld\n", delta);
0959         time_status |= STA_PPSWANDER;
0960         pps_stbcnt++;
0961         pps_dec_freq_interval();
0962     } else {    /* good sample */
0963         pps_inc_freq_interval();
0964     }
0965 
0966     /* the stability metric is calculated as the average of recent
0967      * frequency changes, but is used only for performance
0968      * monitoring
0969      */
0970     delta_mod = delta;
0971     if (delta_mod < 0)
0972         delta_mod = -delta_mod;
0973     pps_stabil += (div_s64(((s64)delta_mod) <<
0974                 (NTP_SCALE_SHIFT - SHIFT_USEC),
0975                 NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN;
0976 
0977     /* if enabled, the system clock frequency is updated */
0978     if ((time_status & STA_PPSFREQ) != 0 &&
0979         (time_status & STA_FREQHOLD) == 0) {
0980         time_freq = pps_freq;
0981         ntp_update_frequency();
0982     }
0983 
0984     return delta;
0985 }
0986 
0987 /* correct REALTIME clock phase error against PPS signal */
0988 static void hardpps_update_phase(long error)
0989 {
0990     long correction = -error;
0991     long jitter;
0992 
0993     /* add the sample to the median filter */
0994     pps_phase_filter_add(correction);
0995     correction = pps_phase_filter_get(&jitter);
0996 
0997     /* Nominal jitter is due to PPS signal noise. If it exceeds the
0998      * threshold, the sample is discarded; otherwise, if so enabled,
0999      * the time offset is updated.
1000      */
1001     if (jitter > (pps_jitter << PPS_POPCORN)) {
1002         printk_deferred(KERN_WARNING
1003                 "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
1004                 jitter, (pps_jitter << PPS_POPCORN));
1005         time_status |= STA_PPSJITTER;
1006         pps_jitcnt++;
1007     } else if (time_status & STA_PPSTIME) {
1008         /* correct the time using the phase offset */
1009         time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
1010                 NTP_INTERVAL_FREQ);
1011         /* cancel running adjtime() */
1012         time_adjust = 0;
1013     }
1014     /* update jitter */
1015     pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN;
1016 }
1017 
1018 /*
1019  * __hardpps() - discipline CPU clock oscillator to external PPS signal
1020  *
1021  * This routine is called at each PPS signal arrival in order to
1022  * discipline the CPU clock oscillator to the PPS signal. It takes two
1023  * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former
1024  * is used to correct clock phase error and the latter is used to
1025  * correct the frequency.
1026  *
1027  * This code is based on David Mills's reference nanokernel
1028  * implementation. It was mostly rewritten but keeps the same idea.
1029  */
1030 void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
1031 {
1032     struct pps_normtime pts_norm, freq_norm;
1033 
1034     pts_norm = pps_normalize_ts(*phase_ts);
1035 
1036     /* clear the error bits, they will be set again if needed */
1037     time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1038 
1039     /* indicate signal presence */
1040     time_status |= STA_PPSSIGNAL;
1041     pps_valid = PPS_VALID;
1042 
1043     /* when called for the first time,
1044      * just start the frequency interval */
1045     if (unlikely(pps_fbase.tv_sec == 0)) {
1046         pps_fbase = *raw_ts;
1047         return;
1048     }
1049 
1050     /* ok, now we have a base for frequency calculation */
1051     freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase));
1052 
1053     /* check that the signal is in the range
1054      * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
1055     if ((freq_norm.sec == 0) ||
1056             (freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
1057             (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
1058         time_status |= STA_PPSJITTER;
1059         /* restart the frequency calibration interval */
1060         pps_fbase = *raw_ts;
1061         printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n");
1062         return;
1063     }
1064 
1065     /* signal is ok */
1066 
1067     /* check if the current frequency interval is finished */
1068     if (freq_norm.sec >= (1 << pps_shift)) {
1069         pps_calcnt++;
1070         /* restart the frequency calibration interval */
1071         pps_fbase = *raw_ts;
1072         hardpps_update_freq(freq_norm);
1073     }
1074 
1075     hardpps_update_phase(pts_norm.nsec);
1076 
1077 }
1078 #endif  /* CONFIG_NTP_PPS */
1079 
1080 static int __init ntp_tick_adj_setup(char *str)
1081 {
1082     int rc = kstrtos64(str, 0, &ntp_tick_adj);
1083     if (rc)
1084         return rc;
1085 
1086     ntp_tick_adj <<= NTP_SCALE_SHIFT;
1087     return 1;
1088 }
1089 
1090 __setup("ntp_tick_adj=", ntp_tick_adj_setup);
1091 
1092 void __init ntp_init(void)
1093 {
1094     ntp_clear();
1095     ntp_init_cmos_sync();
1096 }