Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  Copyright (C) 1991, 1992  Linus Torvalds
0004  *
0005  *  This file contains the interface functions for the various time related
0006  *  system calls: time, stime, gettimeofday, settimeofday, adjtime
0007  *
0008  * Modification history:
0009  *
0010  * 1993-09-02    Philip Gladstone
0011  *      Created file with time related functions from sched/core.c and adjtimex()
0012  * 1993-10-08    Torsten Duwe
0013  *      adjtime interface update and CMOS clock write code
0014  * 1995-08-13    Torsten Duwe
0015  *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
0016  * 1999-01-16    Ulrich Windl
0017  *  Introduced error checking for many cases in adjtimex().
0018  *  Updated NTP code according to technical memorandum Jan '96
0019  *  "A Kernel Model for Precision Timekeeping" by Dave Mills
0020  *  Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
0021  *  (Even though the technical memorandum forbids it)
0022  * 2004-07-14    Christoph Lameter
0023  *  Added getnstimeofday to allow the posix timer functions to return
0024  *  with nanosecond accuracy
0025  */
0026 
0027 #include <linux/export.h>
0028 #include <linux/kernel.h>
0029 #include <linux/timex.h>
0030 #include <linux/capability.h>
0031 #include <linux/timekeeper_internal.h>
0032 #include <linux/errno.h>
0033 #include <linux/syscalls.h>
0034 #include <linux/security.h>
0035 #include <linux/fs.h>
0036 #include <linux/math64.h>
0037 #include <linux/ptrace.h>
0038 
0039 #include <linux/uaccess.h>
0040 #include <linux/compat.h>
0041 #include <asm/unistd.h>
0042 
0043 #include <generated/timeconst.h>
0044 #include "timekeeping.h"
0045 
0046 /*
0047  * The timezone where the local system is located.  Used as a default by some
0048  * programs who obtain this value by using gettimeofday.
0049  */
0050 struct timezone sys_tz;
0051 
0052 EXPORT_SYMBOL(sys_tz);
0053 
0054 #ifdef __ARCH_WANT_SYS_TIME
0055 
0056 /*
0057  * sys_time() can be implemented in user-level using
0058  * sys_gettimeofday().  Is this for backwards compatibility?  If so,
0059  * why not move it into the appropriate arch directory (for those
0060  * architectures that need it).
0061  */
0062 SYSCALL_DEFINE1(time, __kernel_old_time_t __user *, tloc)
0063 {
0064     __kernel_old_time_t i = (__kernel_old_time_t)ktime_get_real_seconds();
0065 
0066     if (tloc) {
0067         if (put_user(i,tloc))
0068             return -EFAULT;
0069     }
0070     force_successful_syscall_return();
0071     return i;
0072 }
0073 
0074 /*
0075  * sys_stime() can be implemented in user-level using
0076  * sys_settimeofday().  Is this for backwards compatibility?  If so,
0077  * why not move it into the appropriate arch directory (for those
0078  * architectures that need it).
0079  */
0080 
0081 SYSCALL_DEFINE1(stime, __kernel_old_time_t __user *, tptr)
0082 {
0083     struct timespec64 tv;
0084     int err;
0085 
0086     if (get_user(tv.tv_sec, tptr))
0087         return -EFAULT;
0088 
0089     tv.tv_nsec = 0;
0090 
0091     err = security_settime64(&tv, NULL);
0092     if (err)
0093         return err;
0094 
0095     do_settimeofday64(&tv);
0096     return 0;
0097 }
0098 
0099 #endif /* __ARCH_WANT_SYS_TIME */
0100 
0101 #ifdef CONFIG_COMPAT_32BIT_TIME
0102 #ifdef __ARCH_WANT_SYS_TIME32
0103 
0104 /* old_time32_t is a 32 bit "long" and needs to get converted. */
0105 SYSCALL_DEFINE1(time32, old_time32_t __user *, tloc)
0106 {
0107     old_time32_t i;
0108 
0109     i = (old_time32_t)ktime_get_real_seconds();
0110 
0111     if (tloc) {
0112         if (put_user(i,tloc))
0113             return -EFAULT;
0114     }
0115     force_successful_syscall_return();
0116     return i;
0117 }
0118 
0119 SYSCALL_DEFINE1(stime32, old_time32_t __user *, tptr)
0120 {
0121     struct timespec64 tv;
0122     int err;
0123 
0124     if (get_user(tv.tv_sec, tptr))
0125         return -EFAULT;
0126 
0127     tv.tv_nsec = 0;
0128 
0129     err = security_settime64(&tv, NULL);
0130     if (err)
0131         return err;
0132 
0133     do_settimeofday64(&tv);
0134     return 0;
0135 }
0136 
0137 #endif /* __ARCH_WANT_SYS_TIME32 */
0138 #endif
0139 
0140 SYSCALL_DEFINE2(gettimeofday, struct __kernel_old_timeval __user *, tv,
0141         struct timezone __user *, tz)
0142 {
0143     if (likely(tv != NULL)) {
0144         struct timespec64 ts;
0145 
0146         ktime_get_real_ts64(&ts);
0147         if (put_user(ts.tv_sec, &tv->tv_sec) ||
0148             put_user(ts.tv_nsec / 1000, &tv->tv_usec))
0149             return -EFAULT;
0150     }
0151     if (unlikely(tz != NULL)) {
0152         if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
0153             return -EFAULT;
0154     }
0155     return 0;
0156 }
0157 
0158 /*
0159  * In case for some reason the CMOS clock has not already been running
0160  * in UTC, but in some local time: The first time we set the timezone,
0161  * we will warp the clock so that it is ticking UTC time instead of
0162  * local time. Presumably, if someone is setting the timezone then we
0163  * are running in an environment where the programs understand about
0164  * timezones. This should be done at boot time in the /etc/rc script,
0165  * as soon as possible, so that the clock can be set right. Otherwise,
0166  * various programs will get confused when the clock gets warped.
0167  */
0168 
0169 int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
0170 {
0171     static int firsttime = 1;
0172     int error = 0;
0173 
0174     if (tv && !timespec64_valid_settod(tv))
0175         return -EINVAL;
0176 
0177     error = security_settime64(tv, tz);
0178     if (error)
0179         return error;
0180 
0181     if (tz) {
0182         /* Verify we're within the +-15 hrs range */
0183         if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
0184             return -EINVAL;
0185 
0186         sys_tz = *tz;
0187         update_vsyscall_tz();
0188         if (firsttime) {
0189             firsttime = 0;
0190             if (!tv)
0191                 timekeeping_warp_clock();
0192         }
0193     }
0194     if (tv)
0195         return do_settimeofday64(tv);
0196     return 0;
0197 }
0198 
0199 SYSCALL_DEFINE2(settimeofday, struct __kernel_old_timeval __user *, tv,
0200         struct timezone __user *, tz)
0201 {
0202     struct timespec64 new_ts;
0203     struct timezone new_tz;
0204 
0205     if (tv) {
0206         if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
0207             get_user(new_ts.tv_nsec, &tv->tv_usec))
0208             return -EFAULT;
0209 
0210         if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
0211             return -EINVAL;
0212 
0213         new_ts.tv_nsec *= NSEC_PER_USEC;
0214     }
0215     if (tz) {
0216         if (copy_from_user(&new_tz, tz, sizeof(*tz)))
0217             return -EFAULT;
0218     }
0219 
0220     return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
0221 }
0222 
0223 #ifdef CONFIG_COMPAT
0224 COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv,
0225                struct timezone __user *, tz)
0226 {
0227     if (tv) {
0228         struct timespec64 ts;
0229 
0230         ktime_get_real_ts64(&ts);
0231         if (put_user(ts.tv_sec, &tv->tv_sec) ||
0232             put_user(ts.tv_nsec / 1000, &tv->tv_usec))
0233             return -EFAULT;
0234     }
0235     if (tz) {
0236         if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
0237             return -EFAULT;
0238     }
0239 
0240     return 0;
0241 }
0242 
0243 COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
0244                struct timezone __user *, tz)
0245 {
0246     struct timespec64 new_ts;
0247     struct timezone new_tz;
0248 
0249     if (tv) {
0250         if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
0251             get_user(new_ts.tv_nsec, &tv->tv_usec))
0252             return -EFAULT;
0253 
0254         if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
0255             return -EINVAL;
0256 
0257         new_ts.tv_nsec *= NSEC_PER_USEC;
0258     }
0259     if (tz) {
0260         if (copy_from_user(&new_tz, tz, sizeof(*tz)))
0261             return -EFAULT;
0262     }
0263 
0264     return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
0265 }
0266 #endif
0267 
0268 #ifdef CONFIG_64BIT
0269 SYSCALL_DEFINE1(adjtimex, struct __kernel_timex __user *, txc_p)
0270 {
0271     struct __kernel_timex txc;      /* Local copy of parameter */
0272     int ret;
0273 
0274     /* Copy the user data space into the kernel copy
0275      * structure. But bear in mind that the structures
0276      * may change
0277      */
0278     if (copy_from_user(&txc, txc_p, sizeof(struct __kernel_timex)))
0279         return -EFAULT;
0280     ret = do_adjtimex(&txc);
0281     return copy_to_user(txc_p, &txc, sizeof(struct __kernel_timex)) ? -EFAULT : ret;
0282 }
0283 #endif
0284 
0285 #ifdef CONFIG_COMPAT_32BIT_TIME
0286 int get_old_timex32(struct __kernel_timex *txc, const struct old_timex32 __user *utp)
0287 {
0288     struct old_timex32 tx32;
0289 
0290     memset(txc, 0, sizeof(struct __kernel_timex));
0291     if (copy_from_user(&tx32, utp, sizeof(struct old_timex32)))
0292         return -EFAULT;
0293 
0294     txc->modes = tx32.modes;
0295     txc->offset = tx32.offset;
0296     txc->freq = tx32.freq;
0297     txc->maxerror = tx32.maxerror;
0298     txc->esterror = tx32.esterror;
0299     txc->status = tx32.status;
0300     txc->constant = tx32.constant;
0301     txc->precision = tx32.precision;
0302     txc->tolerance = tx32.tolerance;
0303     txc->time.tv_sec = tx32.time.tv_sec;
0304     txc->time.tv_usec = tx32.time.tv_usec;
0305     txc->tick = tx32.tick;
0306     txc->ppsfreq = tx32.ppsfreq;
0307     txc->jitter = tx32.jitter;
0308     txc->shift = tx32.shift;
0309     txc->stabil = tx32.stabil;
0310     txc->jitcnt = tx32.jitcnt;
0311     txc->calcnt = tx32.calcnt;
0312     txc->errcnt = tx32.errcnt;
0313     txc->stbcnt = tx32.stbcnt;
0314 
0315     return 0;
0316 }
0317 
0318 int put_old_timex32(struct old_timex32 __user *utp, const struct __kernel_timex *txc)
0319 {
0320     struct old_timex32 tx32;
0321 
0322     memset(&tx32, 0, sizeof(struct old_timex32));
0323     tx32.modes = txc->modes;
0324     tx32.offset = txc->offset;
0325     tx32.freq = txc->freq;
0326     tx32.maxerror = txc->maxerror;
0327     tx32.esterror = txc->esterror;
0328     tx32.status = txc->status;
0329     tx32.constant = txc->constant;
0330     tx32.precision = txc->precision;
0331     tx32.tolerance = txc->tolerance;
0332     tx32.time.tv_sec = txc->time.tv_sec;
0333     tx32.time.tv_usec = txc->time.tv_usec;
0334     tx32.tick = txc->tick;
0335     tx32.ppsfreq = txc->ppsfreq;
0336     tx32.jitter = txc->jitter;
0337     tx32.shift = txc->shift;
0338     tx32.stabil = txc->stabil;
0339     tx32.jitcnt = txc->jitcnt;
0340     tx32.calcnt = txc->calcnt;
0341     tx32.errcnt = txc->errcnt;
0342     tx32.stbcnt = txc->stbcnt;
0343     tx32.tai = txc->tai;
0344     if (copy_to_user(utp, &tx32, sizeof(struct old_timex32)))
0345         return -EFAULT;
0346     return 0;
0347 }
0348 
0349 SYSCALL_DEFINE1(adjtimex_time32, struct old_timex32 __user *, utp)
0350 {
0351     struct __kernel_timex txc;
0352     int err, ret;
0353 
0354     err = get_old_timex32(&txc, utp);
0355     if (err)
0356         return err;
0357 
0358     ret = do_adjtimex(&txc);
0359 
0360     err = put_old_timex32(utp, &txc);
0361     if (err)
0362         return err;
0363 
0364     return ret;
0365 }
0366 #endif
0367 
0368 /*
0369  * Convert jiffies to milliseconds and back.
0370  *
0371  * Avoid unnecessary multiplications/divisions in the
0372  * two most common HZ cases:
0373  */
0374 unsigned int jiffies_to_msecs(const unsigned long j)
0375 {
0376 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
0377     return (MSEC_PER_SEC / HZ) * j;
0378 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
0379     return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
0380 #else
0381 # if BITS_PER_LONG == 32
0382     return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
0383            HZ_TO_MSEC_SHR32;
0384 # else
0385     return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
0386 # endif
0387 #endif
0388 }
0389 EXPORT_SYMBOL(jiffies_to_msecs);
0390 
0391 unsigned int jiffies_to_usecs(const unsigned long j)
0392 {
0393     /*
0394      * Hz usually doesn't go much further MSEC_PER_SEC.
0395      * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
0396      */
0397     BUILD_BUG_ON(HZ > USEC_PER_SEC);
0398 
0399 #if !(USEC_PER_SEC % HZ)
0400     return (USEC_PER_SEC / HZ) * j;
0401 #else
0402 # if BITS_PER_LONG == 32
0403     return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
0404 # else
0405     return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
0406 # endif
0407 #endif
0408 }
0409 EXPORT_SYMBOL(jiffies_to_usecs);
0410 
0411 /*
0412  * mktime64 - Converts date to seconds.
0413  * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
0414  * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
0415  * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
0416  *
0417  * [For the Julian calendar (which was used in Russia before 1917,
0418  * Britain & colonies before 1752, anywhere else before 1582,
0419  * and is still in use by some communities) leave out the
0420  * -year/100+year/400 terms, and add 10.]
0421  *
0422  * This algorithm was first published by Gauss (I think).
0423  *
0424  * A leap second can be indicated by calling this function with sec as
0425  * 60 (allowable under ISO 8601).  The leap second is treated the same
0426  * as the following second since they don't exist in UNIX time.
0427  *
0428  * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
0429  * tomorrow - (allowable under ISO 8601) is supported.
0430  */
0431 time64_t mktime64(const unsigned int year0, const unsigned int mon0,
0432         const unsigned int day, const unsigned int hour,
0433         const unsigned int min, const unsigned int sec)
0434 {
0435     unsigned int mon = mon0, year = year0;
0436 
0437     /* 1..12 -> 11,12,1..10 */
0438     if (0 >= (int) (mon -= 2)) {
0439         mon += 12;  /* Puts Feb last since it has leap day */
0440         year -= 1;
0441     }
0442 
0443     return ((((time64_t)
0444           (year/4 - year/100 + year/400 + 367*mon/12 + day) +
0445           year*365 - 719499
0446         )*24 + hour /* now have hours - midnight tomorrow handled here */
0447       )*60 + min /* now have minutes */
0448     )*60 + sec; /* finally seconds */
0449 }
0450 EXPORT_SYMBOL(mktime64);
0451 
0452 struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec)
0453 {
0454     struct timespec64 ts = ns_to_timespec64(nsec);
0455     struct __kernel_old_timeval tv;
0456 
0457     tv.tv_sec = ts.tv_sec;
0458     tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000;
0459 
0460     return tv;
0461 }
0462 EXPORT_SYMBOL(ns_to_kernel_old_timeval);
0463 
0464 /**
0465  * set_normalized_timespec - set timespec sec and nsec parts and normalize
0466  *
0467  * @ts:     pointer to timespec variable to be set
0468  * @sec:    seconds to set
0469  * @nsec:   nanoseconds to set
0470  *
0471  * Set seconds and nanoseconds field of a timespec variable and
0472  * normalize to the timespec storage format
0473  *
0474  * Note: The tv_nsec part is always in the range of
0475  *  0 <= tv_nsec < NSEC_PER_SEC
0476  * For negative values only the tv_sec field is negative !
0477  */
0478 void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
0479 {
0480     while (nsec >= NSEC_PER_SEC) {
0481         /*
0482          * The following asm() prevents the compiler from
0483          * optimising this loop into a modulo operation. See
0484          * also __iter_div_u64_rem() in include/linux/time.h
0485          */
0486         asm("" : "+rm"(nsec));
0487         nsec -= NSEC_PER_SEC;
0488         ++sec;
0489     }
0490     while (nsec < 0) {
0491         asm("" : "+rm"(nsec));
0492         nsec += NSEC_PER_SEC;
0493         --sec;
0494     }
0495     ts->tv_sec = sec;
0496     ts->tv_nsec = nsec;
0497 }
0498 EXPORT_SYMBOL(set_normalized_timespec64);
0499 
0500 /**
0501  * ns_to_timespec64 - Convert nanoseconds to timespec64
0502  * @nsec:       the nanoseconds value to be converted
0503  *
0504  * Returns the timespec64 representation of the nsec parameter.
0505  */
0506 struct timespec64 ns_to_timespec64(s64 nsec)
0507 {
0508     struct timespec64 ts = { 0, 0 };
0509     s32 rem;
0510 
0511     if (likely(nsec > 0)) {
0512         ts.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
0513         ts.tv_nsec = rem;
0514     } else if (nsec < 0) {
0515         /*
0516          * With negative times, tv_sec points to the earlier
0517          * second, and tv_nsec counts the nanoseconds since
0518          * then, so tv_nsec is always a positive number.
0519          */
0520         ts.tv_sec = -div_u64_rem(-nsec - 1, NSEC_PER_SEC, &rem) - 1;
0521         ts.tv_nsec = NSEC_PER_SEC - rem - 1;
0522     }
0523 
0524     return ts;
0525 }
0526 EXPORT_SYMBOL(ns_to_timespec64);
0527 
0528 /**
0529  * msecs_to_jiffies: - convert milliseconds to jiffies
0530  * @m:  time in milliseconds
0531  *
0532  * conversion is done as follows:
0533  *
0534  * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
0535  *
0536  * - 'too large' values [that would result in larger than
0537  *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
0538  *
0539  * - all other values are converted to jiffies by either multiplying
0540  *   the input value by a factor or dividing it with a factor and
0541  *   handling any 32-bit overflows.
0542  *   for the details see __msecs_to_jiffies()
0543  *
0544  * msecs_to_jiffies() checks for the passed in value being a constant
0545  * via __builtin_constant_p() allowing gcc to eliminate most of the
0546  * code, __msecs_to_jiffies() is called if the value passed does not
0547  * allow constant folding and the actual conversion must be done at
0548  * runtime.
0549  * the _msecs_to_jiffies helpers are the HZ dependent conversion
0550  * routines found in include/linux/jiffies.h
0551  */
0552 unsigned long __msecs_to_jiffies(const unsigned int m)
0553 {
0554     /*
0555      * Negative value, means infinite timeout:
0556      */
0557     if ((int)m < 0)
0558         return MAX_JIFFY_OFFSET;
0559     return _msecs_to_jiffies(m);
0560 }
0561 EXPORT_SYMBOL(__msecs_to_jiffies);
0562 
0563 unsigned long __usecs_to_jiffies(const unsigned int u)
0564 {
0565     if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
0566         return MAX_JIFFY_OFFSET;
0567     return _usecs_to_jiffies(u);
0568 }
0569 EXPORT_SYMBOL(__usecs_to_jiffies);
0570 
0571 /*
0572  * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
0573  * that a remainder subtract here would not do the right thing as the
0574  * resolution values don't fall on second boundaries.  I.e. the line:
0575  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
0576  * Note that due to the small error in the multiplier here, this
0577  * rounding is incorrect for sufficiently large values of tv_nsec, but
0578  * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
0579  * OK.
0580  *
0581  * Rather, we just shift the bits off the right.
0582  *
0583  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
0584  * value to a scaled second value.
0585  */
0586 
0587 unsigned long
0588 timespec64_to_jiffies(const struct timespec64 *value)
0589 {
0590     u64 sec = value->tv_sec;
0591     long nsec = value->tv_nsec + TICK_NSEC - 1;
0592 
0593     if (sec >= MAX_SEC_IN_JIFFIES){
0594         sec = MAX_SEC_IN_JIFFIES;
0595         nsec = 0;
0596     }
0597     return ((sec * SEC_CONVERSION) +
0598         (((u64)nsec * NSEC_CONVERSION) >>
0599          (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
0600 
0601 }
0602 EXPORT_SYMBOL(timespec64_to_jiffies);
0603 
0604 void
0605 jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
0606 {
0607     /*
0608      * Convert jiffies to nanoseconds and separate with
0609      * one divide.
0610      */
0611     u32 rem;
0612     value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
0613                     NSEC_PER_SEC, &rem);
0614     value->tv_nsec = rem;
0615 }
0616 EXPORT_SYMBOL(jiffies_to_timespec64);
0617 
0618 /*
0619  * Convert jiffies/jiffies_64 to clock_t and back.
0620  */
0621 clock_t jiffies_to_clock_t(unsigned long x)
0622 {
0623 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
0624 # if HZ < USER_HZ
0625     return x * (USER_HZ / HZ);
0626 # else
0627     return x / (HZ / USER_HZ);
0628 # endif
0629 #else
0630     return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
0631 #endif
0632 }
0633 EXPORT_SYMBOL(jiffies_to_clock_t);
0634 
0635 unsigned long clock_t_to_jiffies(unsigned long x)
0636 {
0637 #if (HZ % USER_HZ)==0
0638     if (x >= ~0UL / (HZ / USER_HZ))
0639         return ~0UL;
0640     return x * (HZ / USER_HZ);
0641 #else
0642     /* Don't worry about loss of precision here .. */
0643     if (x >= ~0UL / HZ * USER_HZ)
0644         return ~0UL;
0645 
0646     /* .. but do try to contain it here */
0647     return div_u64((u64)x * HZ, USER_HZ);
0648 #endif
0649 }
0650 EXPORT_SYMBOL(clock_t_to_jiffies);
0651 
0652 u64 jiffies_64_to_clock_t(u64 x)
0653 {
0654 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
0655 # if HZ < USER_HZ
0656     x = div_u64(x * USER_HZ, HZ);
0657 # elif HZ > USER_HZ
0658     x = div_u64(x, HZ / USER_HZ);
0659 # else
0660     /* Nothing to do */
0661 # endif
0662 #else
0663     /*
0664      * There are better ways that don't overflow early,
0665      * but even this doesn't overflow in hundreds of years
0666      * in 64 bits, so..
0667      */
0668     x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
0669 #endif
0670     return x;
0671 }
0672 EXPORT_SYMBOL(jiffies_64_to_clock_t);
0673 
0674 u64 nsec_to_clock_t(u64 x)
0675 {
0676 #if (NSEC_PER_SEC % USER_HZ) == 0
0677     return div_u64(x, NSEC_PER_SEC / USER_HZ);
0678 #elif (USER_HZ % 512) == 0
0679     return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
0680 #else
0681     /*
0682          * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
0683          * overflow after 64.99 years.
0684          * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
0685          */
0686     return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
0687 #endif
0688 }
0689 
0690 u64 jiffies64_to_nsecs(u64 j)
0691 {
0692 #if !(NSEC_PER_SEC % HZ)
0693     return (NSEC_PER_SEC / HZ) * j;
0694 # else
0695     return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
0696 #endif
0697 }
0698 EXPORT_SYMBOL(jiffies64_to_nsecs);
0699 
0700 u64 jiffies64_to_msecs(const u64 j)
0701 {
0702 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
0703     return (MSEC_PER_SEC / HZ) * j;
0704 #else
0705     return div_u64(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
0706 #endif
0707 }
0708 EXPORT_SYMBOL(jiffies64_to_msecs);
0709 
0710 /**
0711  * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
0712  *
0713  * @n:  nsecs in u64
0714  *
0715  * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
0716  * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
0717  * for scheduler, not for use in device drivers to calculate timeout value.
0718  *
0719  * note:
0720  *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
0721  *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
0722  */
0723 u64 nsecs_to_jiffies64(u64 n)
0724 {
0725 #if (NSEC_PER_SEC % HZ) == 0
0726     /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
0727     return div_u64(n, NSEC_PER_SEC / HZ);
0728 #elif (HZ % 512) == 0
0729     /* overflow after 292 years if HZ = 1024 */
0730     return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
0731 #else
0732     /*
0733      * Generic case - optimized for cases where HZ is a multiple of 3.
0734      * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
0735      */
0736     return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
0737 #endif
0738 }
0739 EXPORT_SYMBOL(nsecs_to_jiffies64);
0740 
0741 /**
0742  * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
0743  *
0744  * @n:  nsecs in u64
0745  *
0746  * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
0747  * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
0748  * for scheduler, not for use in device drivers to calculate timeout value.
0749  *
0750  * note:
0751  *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
0752  *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
0753  */
0754 unsigned long nsecs_to_jiffies(u64 n)
0755 {
0756     return (unsigned long)nsecs_to_jiffies64(n);
0757 }
0758 EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
0759 
0760 /*
0761  * Add two timespec64 values and do a safety check for overflow.
0762  * It's assumed that both values are valid (>= 0).
0763  * And, each timespec64 is in normalized form.
0764  */
0765 struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
0766                 const struct timespec64 rhs)
0767 {
0768     struct timespec64 res;
0769 
0770     set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
0771             lhs.tv_nsec + rhs.tv_nsec);
0772 
0773     if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
0774         res.tv_sec = TIME64_MAX;
0775         res.tv_nsec = 0;
0776     }
0777 
0778     return res;
0779 }
0780 
0781 int get_timespec64(struct timespec64 *ts,
0782            const struct __kernel_timespec __user *uts)
0783 {
0784     struct __kernel_timespec kts;
0785     int ret;
0786 
0787     ret = copy_from_user(&kts, uts, sizeof(kts));
0788     if (ret)
0789         return -EFAULT;
0790 
0791     ts->tv_sec = kts.tv_sec;
0792 
0793     /* Zero out the padding in compat mode */
0794     if (in_compat_syscall())
0795         kts.tv_nsec &= 0xFFFFFFFFUL;
0796 
0797     /* In 32-bit mode, this drops the padding */
0798     ts->tv_nsec = kts.tv_nsec;
0799 
0800     return 0;
0801 }
0802 EXPORT_SYMBOL_GPL(get_timespec64);
0803 
0804 int put_timespec64(const struct timespec64 *ts,
0805            struct __kernel_timespec __user *uts)
0806 {
0807     struct __kernel_timespec kts = {
0808         .tv_sec = ts->tv_sec,
0809         .tv_nsec = ts->tv_nsec
0810     };
0811 
0812     return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
0813 }
0814 EXPORT_SYMBOL_GPL(put_timespec64);
0815 
0816 static int __get_old_timespec32(struct timespec64 *ts64,
0817                    const struct old_timespec32 __user *cts)
0818 {
0819     struct old_timespec32 ts;
0820     int ret;
0821 
0822     ret = copy_from_user(&ts, cts, sizeof(ts));
0823     if (ret)
0824         return -EFAULT;
0825 
0826     ts64->tv_sec = ts.tv_sec;
0827     ts64->tv_nsec = ts.tv_nsec;
0828 
0829     return 0;
0830 }
0831 
0832 static int __put_old_timespec32(const struct timespec64 *ts64,
0833                    struct old_timespec32 __user *cts)
0834 {
0835     struct old_timespec32 ts = {
0836         .tv_sec = ts64->tv_sec,
0837         .tv_nsec = ts64->tv_nsec
0838     };
0839     return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
0840 }
0841 
0842 int get_old_timespec32(struct timespec64 *ts, const void __user *uts)
0843 {
0844     if (COMPAT_USE_64BIT_TIME)
0845         return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
0846     else
0847         return __get_old_timespec32(ts, uts);
0848 }
0849 EXPORT_SYMBOL_GPL(get_old_timespec32);
0850 
0851 int put_old_timespec32(const struct timespec64 *ts, void __user *uts)
0852 {
0853     if (COMPAT_USE_64BIT_TIME)
0854         return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
0855     else
0856         return __put_old_timespec32(ts, uts);
0857 }
0858 EXPORT_SYMBOL_GPL(put_old_timespec32);
0859 
0860 int get_itimerspec64(struct itimerspec64 *it,
0861             const struct __kernel_itimerspec __user *uit)
0862 {
0863     int ret;
0864 
0865     ret = get_timespec64(&it->it_interval, &uit->it_interval);
0866     if (ret)
0867         return ret;
0868 
0869     ret = get_timespec64(&it->it_value, &uit->it_value);
0870 
0871     return ret;
0872 }
0873 EXPORT_SYMBOL_GPL(get_itimerspec64);
0874 
0875 int put_itimerspec64(const struct itimerspec64 *it,
0876             struct __kernel_itimerspec __user *uit)
0877 {
0878     int ret;
0879 
0880     ret = put_timespec64(&it->it_interval, &uit->it_interval);
0881     if (ret)
0882         return ret;
0883 
0884     ret = put_timespec64(&it->it_value, &uit->it_value);
0885 
0886     return ret;
0887 }
0888 EXPORT_SYMBOL_GPL(put_itimerspec64);
0889 
0890 int get_old_itimerspec32(struct itimerspec64 *its,
0891             const struct old_itimerspec32 __user *uits)
0892 {
0893 
0894     if (__get_old_timespec32(&its->it_interval, &uits->it_interval) ||
0895         __get_old_timespec32(&its->it_value, &uits->it_value))
0896         return -EFAULT;
0897     return 0;
0898 }
0899 EXPORT_SYMBOL_GPL(get_old_itimerspec32);
0900 
0901 int put_old_itimerspec32(const struct itimerspec64 *its,
0902             struct old_itimerspec32 __user *uits)
0903 {
0904     if (__put_old_timespec32(&its->it_interval, &uits->it_interval) ||
0905         __put_old_timespec32(&its->it_value, &uits->it_value))
0906         return -EFAULT;
0907     return 0;
0908 }
0909 EXPORT_SYMBOL_GPL(put_old_itimerspec32);