cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

time.c (23400B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  Copyright (C) 1991, 1992  Linus Torvalds
      4 *
      5 *  This file contains the interface functions for the various time related
      6 *  system calls: time, stime, gettimeofday, settimeofday, adjtime
      7 *
      8 * Modification history:
      9 *
     10 * 1993-09-02    Philip Gladstone
     11 *      Created file with time related functions from sched/core.c and adjtimex()
     12 * 1993-10-08    Torsten Duwe
     13 *      adjtime interface update and CMOS clock write code
     14 * 1995-08-13    Torsten Duwe
     15 *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
     16 * 1999-01-16    Ulrich Windl
     17 *	Introduced error checking for many cases in adjtimex().
     18 *	Updated NTP code according to technical memorandum Jan '96
     19 *	"A Kernel Model for Precision Timekeeping" by Dave Mills
     20 *	Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
     21 *	(Even though the technical memorandum forbids it)
     22 * 2004-07-14	 Christoph Lameter
     23 *	Added getnstimeofday to allow the posix timer functions to return
     24 *	with nanosecond accuracy
     25 */
     26
     27#include <linux/export.h>
     28#include <linux/kernel.h>
     29#include <linux/timex.h>
     30#include <linux/capability.h>
     31#include <linux/timekeeper_internal.h>
     32#include <linux/errno.h>
     33#include <linux/syscalls.h>
     34#include <linux/security.h>
     35#include <linux/fs.h>
     36#include <linux/math64.h>
     37#include <linux/ptrace.h>
     38
     39#include <linux/uaccess.h>
     40#include <linux/compat.h>
     41#include <asm/unistd.h>
     42
     43#include <generated/timeconst.h>
     44#include "timekeeping.h"
     45
     46/*
     47 * The timezone where the local system is located.  Used as a default by some
     48 * programs who obtain this value by using gettimeofday.
     49 */
     50struct timezone sys_tz;
     51
     52EXPORT_SYMBOL(sys_tz);
     53
     54#ifdef __ARCH_WANT_SYS_TIME
     55
     56/*
     57 * sys_time() can be implemented in user-level using
     58 * sys_gettimeofday().  Is this for backwards compatibility?  If so,
     59 * why not move it into the appropriate arch directory (for those
     60 * architectures that need it).
     61 */
     62SYSCALL_DEFINE1(time, __kernel_old_time_t __user *, tloc)
     63{
     64	__kernel_old_time_t i = (__kernel_old_time_t)ktime_get_real_seconds();
     65
     66	if (tloc) {
     67		if (put_user(i,tloc))
     68			return -EFAULT;
     69	}
     70	force_successful_syscall_return();
     71	return i;
     72}
     73
     74/*
     75 * sys_stime() can be implemented in user-level using
     76 * sys_settimeofday().  Is this for backwards compatibility?  If so,
     77 * why not move it into the appropriate arch directory (for those
     78 * architectures that need it).
     79 */
     80
     81SYSCALL_DEFINE1(stime, __kernel_old_time_t __user *, tptr)
     82{
     83	struct timespec64 tv;
     84	int err;
     85
     86	if (get_user(tv.tv_sec, tptr))
     87		return -EFAULT;
     88
     89	tv.tv_nsec = 0;
     90
     91	err = security_settime64(&tv, NULL);
     92	if (err)
     93		return err;
     94
     95	do_settimeofday64(&tv);
     96	return 0;
     97}
     98
     99#endif /* __ARCH_WANT_SYS_TIME */
    100
    101#ifdef CONFIG_COMPAT_32BIT_TIME
    102#ifdef __ARCH_WANT_SYS_TIME32
    103
    104/* old_time32_t is a 32 bit "long" and needs to get converted. */
    105SYSCALL_DEFINE1(time32, old_time32_t __user *, tloc)
    106{
    107	old_time32_t i;
    108
    109	i = (old_time32_t)ktime_get_real_seconds();
    110
    111	if (tloc) {
    112		if (put_user(i,tloc))
    113			return -EFAULT;
    114	}
    115	force_successful_syscall_return();
    116	return i;
    117}
    118
    119SYSCALL_DEFINE1(stime32, old_time32_t __user *, tptr)
    120{
    121	struct timespec64 tv;
    122	int err;
    123
    124	if (get_user(tv.tv_sec, tptr))
    125		return -EFAULT;
    126
    127	tv.tv_nsec = 0;
    128
    129	err = security_settime64(&tv, NULL);
    130	if (err)
    131		return err;
    132
    133	do_settimeofday64(&tv);
    134	return 0;
    135}
    136
    137#endif /* __ARCH_WANT_SYS_TIME32 */
    138#endif
    139
    140SYSCALL_DEFINE2(gettimeofday, struct __kernel_old_timeval __user *, tv,
    141		struct timezone __user *, tz)
    142{
    143	if (likely(tv != NULL)) {
    144		struct timespec64 ts;
    145
    146		ktime_get_real_ts64(&ts);
    147		if (put_user(ts.tv_sec, &tv->tv_sec) ||
    148		    put_user(ts.tv_nsec / 1000, &tv->tv_usec))
    149			return -EFAULT;
    150	}
    151	if (unlikely(tz != NULL)) {
    152		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
    153			return -EFAULT;
    154	}
    155	return 0;
    156}
    157
    158/*
    159 * In case for some reason the CMOS clock has not already been running
    160 * in UTC, but in some local time: The first time we set the timezone,
    161 * we will warp the clock so that it is ticking UTC time instead of
    162 * local time. Presumably, if someone is setting the timezone then we
    163 * are running in an environment where the programs understand about
    164 * timezones. This should be done at boot time in the /etc/rc script,
    165 * as soon as possible, so that the clock can be set right. Otherwise,
    166 * various programs will get confused when the clock gets warped.
    167 */
    168
    169int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
    170{
    171	static int firsttime = 1;
    172	int error = 0;
    173
    174	if (tv && !timespec64_valid_settod(tv))
    175		return -EINVAL;
    176
    177	error = security_settime64(tv, tz);
    178	if (error)
    179		return error;
    180
    181	if (tz) {
    182		/* Verify we're within the +-15 hrs range */
    183		if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
    184			return -EINVAL;
    185
    186		sys_tz = *tz;
    187		update_vsyscall_tz();
    188		if (firsttime) {
    189			firsttime = 0;
    190			if (!tv)
    191				timekeeping_warp_clock();
    192		}
    193	}
    194	if (tv)
    195		return do_settimeofday64(tv);
    196	return 0;
    197}
    198
    199SYSCALL_DEFINE2(settimeofday, struct __kernel_old_timeval __user *, tv,
    200		struct timezone __user *, tz)
    201{
    202	struct timespec64 new_ts;
    203	struct timezone new_tz;
    204
    205	if (tv) {
    206		if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
    207		    get_user(new_ts.tv_nsec, &tv->tv_usec))
    208			return -EFAULT;
    209
    210		if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
    211			return -EINVAL;
    212
    213		new_ts.tv_nsec *= NSEC_PER_USEC;
    214	}
    215	if (tz) {
    216		if (copy_from_user(&new_tz, tz, sizeof(*tz)))
    217			return -EFAULT;
    218	}
    219
    220	return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
    221}
    222
    223#ifdef CONFIG_COMPAT
    224COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv,
    225		       struct timezone __user *, tz)
    226{
    227	if (tv) {
    228		struct timespec64 ts;
    229
    230		ktime_get_real_ts64(&ts);
    231		if (put_user(ts.tv_sec, &tv->tv_sec) ||
    232		    put_user(ts.tv_nsec / 1000, &tv->tv_usec))
    233			return -EFAULT;
    234	}
    235	if (tz) {
    236		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
    237			return -EFAULT;
    238	}
    239
    240	return 0;
    241}
    242
    243COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
    244		       struct timezone __user *, tz)
    245{
    246	struct timespec64 new_ts;
    247	struct timezone new_tz;
    248
    249	if (tv) {
    250		if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
    251		    get_user(new_ts.tv_nsec, &tv->tv_usec))
    252			return -EFAULT;
    253
    254		if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
    255			return -EINVAL;
    256
    257		new_ts.tv_nsec *= NSEC_PER_USEC;
    258	}
    259	if (tz) {
    260		if (copy_from_user(&new_tz, tz, sizeof(*tz)))
    261			return -EFAULT;
    262	}
    263
    264	return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
    265}
    266#endif
    267
    268#ifdef CONFIG_64BIT
    269SYSCALL_DEFINE1(adjtimex, struct __kernel_timex __user *, txc_p)
    270{
    271	struct __kernel_timex txc;		/* Local copy of parameter */
    272	int ret;
    273
    274	/* Copy the user data space into the kernel copy
    275	 * structure. But bear in mind that the structures
    276	 * may change
    277	 */
    278	if (copy_from_user(&txc, txc_p, sizeof(struct __kernel_timex)))
    279		return -EFAULT;
    280	ret = do_adjtimex(&txc);
    281	return copy_to_user(txc_p, &txc, sizeof(struct __kernel_timex)) ? -EFAULT : ret;
    282}
    283#endif
    284
    285#ifdef CONFIG_COMPAT_32BIT_TIME
    286int get_old_timex32(struct __kernel_timex *txc, const struct old_timex32 __user *utp)
    287{
    288	struct old_timex32 tx32;
    289
    290	memset(txc, 0, sizeof(struct __kernel_timex));
    291	if (copy_from_user(&tx32, utp, sizeof(struct old_timex32)))
    292		return -EFAULT;
    293
    294	txc->modes = tx32.modes;
    295	txc->offset = tx32.offset;
    296	txc->freq = tx32.freq;
    297	txc->maxerror = tx32.maxerror;
    298	txc->esterror = tx32.esterror;
    299	txc->status = tx32.status;
    300	txc->constant = tx32.constant;
    301	txc->precision = tx32.precision;
    302	txc->tolerance = tx32.tolerance;
    303	txc->time.tv_sec = tx32.time.tv_sec;
    304	txc->time.tv_usec = tx32.time.tv_usec;
    305	txc->tick = tx32.tick;
    306	txc->ppsfreq = tx32.ppsfreq;
    307	txc->jitter = tx32.jitter;
    308	txc->shift = tx32.shift;
    309	txc->stabil = tx32.stabil;
    310	txc->jitcnt = tx32.jitcnt;
    311	txc->calcnt = tx32.calcnt;
    312	txc->errcnt = tx32.errcnt;
    313	txc->stbcnt = tx32.stbcnt;
    314
    315	return 0;
    316}
    317
    318int put_old_timex32(struct old_timex32 __user *utp, const struct __kernel_timex *txc)
    319{
    320	struct old_timex32 tx32;
    321
    322	memset(&tx32, 0, sizeof(struct old_timex32));
    323	tx32.modes = txc->modes;
    324	tx32.offset = txc->offset;
    325	tx32.freq = txc->freq;
    326	tx32.maxerror = txc->maxerror;
    327	tx32.esterror = txc->esterror;
    328	tx32.status = txc->status;
    329	tx32.constant = txc->constant;
    330	tx32.precision = txc->precision;
    331	tx32.tolerance = txc->tolerance;
    332	tx32.time.tv_sec = txc->time.tv_sec;
    333	tx32.time.tv_usec = txc->time.tv_usec;
    334	tx32.tick = txc->tick;
    335	tx32.ppsfreq = txc->ppsfreq;
    336	tx32.jitter = txc->jitter;
    337	tx32.shift = txc->shift;
    338	tx32.stabil = txc->stabil;
    339	tx32.jitcnt = txc->jitcnt;
    340	tx32.calcnt = txc->calcnt;
    341	tx32.errcnt = txc->errcnt;
    342	tx32.stbcnt = txc->stbcnt;
    343	tx32.tai = txc->tai;
    344	if (copy_to_user(utp, &tx32, sizeof(struct old_timex32)))
    345		return -EFAULT;
    346	return 0;
    347}
    348
    349SYSCALL_DEFINE1(adjtimex_time32, struct old_timex32 __user *, utp)
    350{
    351	struct __kernel_timex txc;
    352	int err, ret;
    353
    354	err = get_old_timex32(&txc, utp);
    355	if (err)
    356		return err;
    357
    358	ret = do_adjtimex(&txc);
    359
    360	err = put_old_timex32(utp, &txc);
    361	if (err)
    362		return err;
    363
    364	return ret;
    365}
    366#endif
    367
    368/*
    369 * Convert jiffies to milliseconds and back.
    370 *
    371 * Avoid unnecessary multiplications/divisions in the
    372 * two most common HZ cases:
    373 */
    374unsigned int jiffies_to_msecs(const unsigned long j)
    375{
    376#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
    377	return (MSEC_PER_SEC / HZ) * j;
    378#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
    379	return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
    380#else
    381# if BITS_PER_LONG == 32
    382	return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
    383	       HZ_TO_MSEC_SHR32;
    384# else
    385	return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
    386# endif
    387#endif
    388}
    389EXPORT_SYMBOL(jiffies_to_msecs);
    390
    391unsigned int jiffies_to_usecs(const unsigned long j)
    392{
    393	/*
    394	 * Hz usually doesn't go much further MSEC_PER_SEC.
    395	 * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
    396	 */
    397	BUILD_BUG_ON(HZ > USEC_PER_SEC);
    398
    399#if !(USEC_PER_SEC % HZ)
    400	return (USEC_PER_SEC / HZ) * j;
    401#else
    402# if BITS_PER_LONG == 32
    403	return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
    404# else
    405	return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
    406# endif
    407#endif
    408}
    409EXPORT_SYMBOL(jiffies_to_usecs);
    410
    411/*
    412 * mktime64 - Converts date to seconds.
    413 * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
    414 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
    415 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
    416 *
    417 * [For the Julian calendar (which was used in Russia before 1917,
    418 * Britain & colonies before 1752, anywhere else before 1582,
    419 * and is still in use by some communities) leave out the
    420 * -year/100+year/400 terms, and add 10.]
    421 *
    422 * This algorithm was first published by Gauss (I think).
    423 *
    424 * A leap second can be indicated by calling this function with sec as
    425 * 60 (allowable under ISO 8601).  The leap second is treated the same
    426 * as the following second since they don't exist in UNIX time.
    427 *
    428 * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
    429 * tomorrow - (allowable under ISO 8601) is supported.
    430 */
    431time64_t mktime64(const unsigned int year0, const unsigned int mon0,
    432		const unsigned int day, const unsigned int hour,
    433		const unsigned int min, const unsigned int sec)
    434{
    435	unsigned int mon = mon0, year = year0;
    436
    437	/* 1..12 -> 11,12,1..10 */
    438	if (0 >= (int) (mon -= 2)) {
    439		mon += 12;	/* Puts Feb last since it has leap day */
    440		year -= 1;
    441	}
    442
    443	return ((((time64_t)
    444		  (year/4 - year/100 + year/400 + 367*mon/12 + day) +
    445		  year*365 - 719499
    446	    )*24 + hour /* now have hours - midnight tomorrow handled here */
    447	  )*60 + min /* now have minutes */
    448	)*60 + sec; /* finally seconds */
    449}
    450EXPORT_SYMBOL(mktime64);
    451
    452struct __kernel_old_timeval ns_to_kernel_old_timeval(const s64 nsec)
    453{
    454	struct timespec64 ts = ns_to_timespec64(nsec);
    455	struct __kernel_old_timeval tv;
    456
    457	tv.tv_sec = ts.tv_sec;
    458	tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000;
    459
    460	return tv;
    461}
    462EXPORT_SYMBOL(ns_to_kernel_old_timeval);
    463
    464/**
    465 * set_normalized_timespec - set timespec sec and nsec parts and normalize
    466 *
    467 * @ts:		pointer to timespec variable to be set
    468 * @sec:	seconds to set
    469 * @nsec:	nanoseconds to set
    470 *
    471 * Set seconds and nanoseconds field of a timespec variable and
    472 * normalize to the timespec storage format
    473 *
    474 * Note: The tv_nsec part is always in the range of
    475 *	0 <= tv_nsec < NSEC_PER_SEC
    476 * For negative values only the tv_sec field is negative !
    477 */
    478void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
    479{
    480	while (nsec >= NSEC_PER_SEC) {
    481		/*
    482		 * The following asm() prevents the compiler from
    483		 * optimising this loop into a modulo operation. See
    484		 * also __iter_div_u64_rem() in include/linux/time.h
    485		 */
    486		asm("" : "+rm"(nsec));
    487		nsec -= NSEC_PER_SEC;
    488		++sec;
    489	}
    490	while (nsec < 0) {
    491		asm("" : "+rm"(nsec));
    492		nsec += NSEC_PER_SEC;
    493		--sec;
    494	}
    495	ts->tv_sec = sec;
    496	ts->tv_nsec = nsec;
    497}
    498EXPORT_SYMBOL(set_normalized_timespec64);
    499
    500/**
    501 * ns_to_timespec64 - Convert nanoseconds to timespec64
    502 * @nsec:       the nanoseconds value to be converted
    503 *
    504 * Returns the timespec64 representation of the nsec parameter.
    505 */
    506struct timespec64 ns_to_timespec64(const s64 nsec)
    507{
    508	struct timespec64 ts = { 0, 0 };
    509	s32 rem;
    510
    511	if (likely(nsec > 0)) {
    512		ts.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
    513		ts.tv_nsec = rem;
    514	} else if (nsec < 0) {
    515		/*
    516		 * With negative times, tv_sec points to the earlier
    517		 * second, and tv_nsec counts the nanoseconds since
    518		 * then, so tv_nsec is always a positive number.
    519		 */
    520		ts.tv_sec = -div_u64_rem(-nsec - 1, NSEC_PER_SEC, &rem) - 1;
    521		ts.tv_nsec = NSEC_PER_SEC - rem - 1;
    522	}
    523
    524	return ts;
    525}
    526EXPORT_SYMBOL(ns_to_timespec64);
    527
    528/**
    529 * msecs_to_jiffies: - convert milliseconds to jiffies
    530 * @m:	time in milliseconds
    531 *
    532 * conversion is done as follows:
    533 *
    534 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
    535 *
    536 * - 'too large' values [that would result in larger than
    537 *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
    538 *
    539 * - all other values are converted to jiffies by either multiplying
    540 *   the input value by a factor or dividing it with a factor and
    541 *   handling any 32-bit overflows.
    542 *   for the details see __msecs_to_jiffies()
    543 *
    544 * msecs_to_jiffies() checks for the passed in value being a constant
    545 * via __builtin_constant_p() allowing gcc to eliminate most of the
    546 * code, __msecs_to_jiffies() is called if the value passed does not
    547 * allow constant folding and the actual conversion must be done at
    548 * runtime.
    549 * the _msecs_to_jiffies helpers are the HZ dependent conversion
    550 * routines found in include/linux/jiffies.h
    551 */
    552unsigned long __msecs_to_jiffies(const unsigned int m)
    553{
    554	/*
    555	 * Negative value, means infinite timeout:
    556	 */
    557	if ((int)m < 0)
    558		return MAX_JIFFY_OFFSET;
    559	return _msecs_to_jiffies(m);
    560}
    561EXPORT_SYMBOL(__msecs_to_jiffies);
    562
    563unsigned long __usecs_to_jiffies(const unsigned int u)
    564{
    565	if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
    566		return MAX_JIFFY_OFFSET;
    567	return _usecs_to_jiffies(u);
    568}
    569EXPORT_SYMBOL(__usecs_to_jiffies);
    570
    571/*
    572 * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
    573 * that a remainder subtract here would not do the right thing as the
    574 * resolution values don't fall on second boundaries.  I.e. the line:
    575 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
    576 * Note that due to the small error in the multiplier here, this
    577 * rounding is incorrect for sufficiently large values of tv_nsec, but
    578 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
    579 * OK.
    580 *
    581 * Rather, we just shift the bits off the right.
    582 *
    583 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
    584 * value to a scaled second value.
    585 */
    586
    587unsigned long
    588timespec64_to_jiffies(const struct timespec64 *value)
    589{
    590	u64 sec = value->tv_sec;
    591	long nsec = value->tv_nsec + TICK_NSEC - 1;
    592
    593	if (sec >= MAX_SEC_IN_JIFFIES){
    594		sec = MAX_SEC_IN_JIFFIES;
    595		nsec = 0;
    596	}
    597	return ((sec * SEC_CONVERSION) +
    598		(((u64)nsec * NSEC_CONVERSION) >>
    599		 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
    600
    601}
    602EXPORT_SYMBOL(timespec64_to_jiffies);
    603
    604void
    605jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
    606{
    607	/*
    608	 * Convert jiffies to nanoseconds and separate with
    609	 * one divide.
    610	 */
    611	u32 rem;
    612	value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
    613				    NSEC_PER_SEC, &rem);
    614	value->tv_nsec = rem;
    615}
    616EXPORT_SYMBOL(jiffies_to_timespec64);
    617
    618/*
    619 * Convert jiffies/jiffies_64 to clock_t and back.
    620 */
    621clock_t jiffies_to_clock_t(unsigned long x)
    622{
    623#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
    624# if HZ < USER_HZ
    625	return x * (USER_HZ / HZ);
    626# else
    627	return x / (HZ / USER_HZ);
    628# endif
    629#else
    630	return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
    631#endif
    632}
    633EXPORT_SYMBOL(jiffies_to_clock_t);
    634
    635unsigned long clock_t_to_jiffies(unsigned long x)
    636{
    637#if (HZ % USER_HZ)==0
    638	if (x >= ~0UL / (HZ / USER_HZ))
    639		return ~0UL;
    640	return x * (HZ / USER_HZ);
    641#else
    642	/* Don't worry about loss of precision here .. */
    643	if (x >= ~0UL / HZ * USER_HZ)
    644		return ~0UL;
    645
    646	/* .. but do try to contain it here */
    647	return div_u64((u64)x * HZ, USER_HZ);
    648#endif
    649}
    650EXPORT_SYMBOL(clock_t_to_jiffies);
    651
    652u64 jiffies_64_to_clock_t(u64 x)
    653{
    654#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
    655# if HZ < USER_HZ
    656	x = div_u64(x * USER_HZ, HZ);
    657# elif HZ > USER_HZ
    658	x = div_u64(x, HZ / USER_HZ);
    659# else
    660	/* Nothing to do */
    661# endif
    662#else
    663	/*
    664	 * There are better ways that don't overflow early,
    665	 * but even this doesn't overflow in hundreds of years
    666	 * in 64 bits, so..
    667	 */
    668	x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
    669#endif
    670	return x;
    671}
    672EXPORT_SYMBOL(jiffies_64_to_clock_t);
    673
    674u64 nsec_to_clock_t(u64 x)
    675{
    676#if (NSEC_PER_SEC % USER_HZ) == 0
    677	return div_u64(x, NSEC_PER_SEC / USER_HZ);
    678#elif (USER_HZ % 512) == 0
    679	return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
    680#else
    681	/*
    682         * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
    683         * overflow after 64.99 years.
    684         * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
    685         */
    686	return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
    687#endif
    688}
    689
    690u64 jiffies64_to_nsecs(u64 j)
    691{
    692#if !(NSEC_PER_SEC % HZ)
    693	return (NSEC_PER_SEC / HZ) * j;
    694# else
    695	return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
    696#endif
    697}
    698EXPORT_SYMBOL(jiffies64_to_nsecs);
    699
    700u64 jiffies64_to_msecs(const u64 j)
    701{
    702#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
    703	return (MSEC_PER_SEC / HZ) * j;
    704#else
    705	return div_u64(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
    706#endif
    707}
    708EXPORT_SYMBOL(jiffies64_to_msecs);
    709
    710/**
    711 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
    712 *
    713 * @n:	nsecs in u64
    714 *
    715 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
    716 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
    717 * for scheduler, not for use in device drivers to calculate timeout value.
    718 *
    719 * note:
    720 *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
    721 *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
    722 */
    723u64 nsecs_to_jiffies64(u64 n)
    724{
    725#if (NSEC_PER_SEC % HZ) == 0
    726	/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
    727	return div_u64(n, NSEC_PER_SEC / HZ);
    728#elif (HZ % 512) == 0
    729	/* overflow after 292 years if HZ = 1024 */
    730	return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
    731#else
    732	/*
    733	 * Generic case - optimized for cases where HZ is a multiple of 3.
    734	 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
    735	 */
    736	return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
    737#endif
    738}
    739EXPORT_SYMBOL(nsecs_to_jiffies64);
    740
    741/**
    742 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
    743 *
    744 * @n:	nsecs in u64
    745 *
    746 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
    747 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
    748 * for scheduler, not for use in device drivers to calculate timeout value.
    749 *
    750 * note:
    751 *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
    752 *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
    753 */
    754unsigned long nsecs_to_jiffies(u64 n)
    755{
    756	return (unsigned long)nsecs_to_jiffies64(n);
    757}
    758EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
    759
    760/*
    761 * Add two timespec64 values and do a safety check for overflow.
    762 * It's assumed that both values are valid (>= 0).
    763 * And, each timespec64 is in normalized form.
    764 */
    765struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
    766				const struct timespec64 rhs)
    767{
    768	struct timespec64 res;
    769
    770	set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
    771			lhs.tv_nsec + rhs.tv_nsec);
    772
    773	if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
    774		res.tv_sec = TIME64_MAX;
    775		res.tv_nsec = 0;
    776	}
    777
    778	return res;
    779}
    780
    781int get_timespec64(struct timespec64 *ts,
    782		   const struct __kernel_timespec __user *uts)
    783{
    784	struct __kernel_timespec kts;
    785	int ret;
    786
    787	ret = copy_from_user(&kts, uts, sizeof(kts));
    788	if (ret)
    789		return -EFAULT;
    790
    791	ts->tv_sec = kts.tv_sec;
    792
    793	/* Zero out the padding in compat mode */
    794	if (in_compat_syscall())
    795		kts.tv_nsec &= 0xFFFFFFFFUL;
    796
    797	/* In 32-bit mode, this drops the padding */
    798	ts->tv_nsec = kts.tv_nsec;
    799
    800	return 0;
    801}
    802EXPORT_SYMBOL_GPL(get_timespec64);
    803
    804int put_timespec64(const struct timespec64 *ts,
    805		   struct __kernel_timespec __user *uts)
    806{
    807	struct __kernel_timespec kts = {
    808		.tv_sec = ts->tv_sec,
    809		.tv_nsec = ts->tv_nsec
    810	};
    811
    812	return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
    813}
    814EXPORT_SYMBOL_GPL(put_timespec64);
    815
    816static int __get_old_timespec32(struct timespec64 *ts64,
    817				   const struct old_timespec32 __user *cts)
    818{
    819	struct old_timespec32 ts;
    820	int ret;
    821
    822	ret = copy_from_user(&ts, cts, sizeof(ts));
    823	if (ret)
    824		return -EFAULT;
    825
    826	ts64->tv_sec = ts.tv_sec;
    827	ts64->tv_nsec = ts.tv_nsec;
    828
    829	return 0;
    830}
    831
    832static int __put_old_timespec32(const struct timespec64 *ts64,
    833				   struct old_timespec32 __user *cts)
    834{
    835	struct old_timespec32 ts = {
    836		.tv_sec = ts64->tv_sec,
    837		.tv_nsec = ts64->tv_nsec
    838	};
    839	return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
    840}
    841
    842int get_old_timespec32(struct timespec64 *ts, const void __user *uts)
    843{
    844	if (COMPAT_USE_64BIT_TIME)
    845		return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
    846	else
    847		return __get_old_timespec32(ts, uts);
    848}
    849EXPORT_SYMBOL_GPL(get_old_timespec32);
    850
    851int put_old_timespec32(const struct timespec64 *ts, void __user *uts)
    852{
    853	if (COMPAT_USE_64BIT_TIME)
    854		return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
    855	else
    856		return __put_old_timespec32(ts, uts);
    857}
    858EXPORT_SYMBOL_GPL(put_old_timespec32);
    859
    860int get_itimerspec64(struct itimerspec64 *it,
    861			const struct __kernel_itimerspec __user *uit)
    862{
    863	int ret;
    864
    865	ret = get_timespec64(&it->it_interval, &uit->it_interval);
    866	if (ret)
    867		return ret;
    868
    869	ret = get_timespec64(&it->it_value, &uit->it_value);
    870
    871	return ret;
    872}
    873EXPORT_SYMBOL_GPL(get_itimerspec64);
    874
    875int put_itimerspec64(const struct itimerspec64 *it,
    876			struct __kernel_itimerspec __user *uit)
    877{
    878	int ret;
    879
    880	ret = put_timespec64(&it->it_interval, &uit->it_interval);
    881	if (ret)
    882		return ret;
    883
    884	ret = put_timespec64(&it->it_value, &uit->it_value);
    885
    886	return ret;
    887}
    888EXPORT_SYMBOL_GPL(put_itimerspec64);
    889
    890int get_old_itimerspec32(struct itimerspec64 *its,
    891			const struct old_itimerspec32 __user *uits)
    892{
    893
    894	if (__get_old_timespec32(&its->it_interval, &uits->it_interval) ||
    895	    __get_old_timespec32(&its->it_value, &uits->it_value))
    896		return -EFAULT;
    897	return 0;
    898}
    899EXPORT_SYMBOL_GPL(get_old_itimerspec32);
    900
    901int put_old_itimerspec32(const struct itimerspec64 *its,
    902			struct old_itimerspec32 __user *uits)
    903{
    904	if (__put_old_timespec32(&its->it_interval, &uits->it_interval) ||
    905	    __put_old_timespec32(&its->it_value, &uits->it_value))
    906		return -EFAULT;
    907	return 0;
    908}
    909EXPORT_SYMBOL_GPL(put_old_itimerspec32);