cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ntp.c (28945B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * NTP state machine interfaces and logic.
      4 *
      5 * This code was mainly moved from kernel/timer.c and kernel/time.c
      6 * Please see those files for relevant copyright info and historical
      7 * changelogs.
      8 */
      9#include <linux/capability.h>
     10#include <linux/clocksource.h>
     11#include <linux/workqueue.h>
     12#include <linux/hrtimer.h>
     13#include <linux/jiffies.h>
     14#include <linux/math64.h>
     15#include <linux/timex.h>
     16#include <linux/time.h>
     17#include <linux/mm.h>
     18#include <linux/module.h>
     19#include <linux/rtc.h>
     20#include <linux/audit.h>
     21
     22#include "ntp_internal.h"
     23#include "timekeeping_internal.h"
     24
     25
     26/*
     27 * NTP timekeeping variables:
     28 *
     29 * Note: All of the NTP state is protected by the timekeeping locks.
     30 */
     31
     32
     33/* USER_HZ period (usecs): */
     34unsigned long			tick_usec = USER_TICK_USEC;
     35
     36/* SHIFTED_HZ period (nsecs): */
     37unsigned long			tick_nsec;
     38
     39static u64			tick_length;
     40static u64			tick_length_base;
     41
     42#define SECS_PER_DAY		86400
     43#define MAX_TICKADJ		500LL		/* usecs */
     44#define MAX_TICKADJ_SCALED \
     45	(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
     46#define MAX_TAI_OFFSET		100000
     47
     48/*
     49 * phase-lock loop variables
     50 */
     51
     52/*
     53 * clock synchronization status
     54 *
     55 * (TIME_ERROR prevents overwriting the CMOS clock)
     56 */
     57static int			time_state = TIME_OK;
     58
     59/* clock status bits:							*/
     60static int			time_status = STA_UNSYNC;
     61
     62/* time adjustment (nsecs):						*/
     63static s64			time_offset;
     64
     65/* pll time constant:							*/
     66static long			time_constant = 2;
     67
     68/* maximum error (usecs):						*/
     69static long			time_maxerror = NTP_PHASE_LIMIT;
     70
     71/* estimated error (usecs):						*/
     72static long			time_esterror = NTP_PHASE_LIMIT;
     73
     74/* frequency offset (scaled nsecs/secs):				*/
     75static s64			time_freq;
     76
     77/* time at last adjustment (secs):					*/
     78static time64_t		time_reftime;
     79
     80static long			time_adjust;
     81
     82/* constant (boot-param configurable) NTP tick adjustment (upscaled)	*/
     83static s64			ntp_tick_adj;
     84
     85/* second value of the next pending leapsecond, or TIME64_MAX if no leap */
     86static time64_t			ntp_next_leap_sec = TIME64_MAX;
     87
     88#ifdef CONFIG_NTP_PPS
     89
     90/*
     91 * The following variables are used when a pulse-per-second (PPS) signal
     92 * is available. They establish the engineering parameters of the clock
     93 * discipline loop when controlled by the PPS signal.
     94 */
     95#define PPS_VALID	10	/* PPS signal watchdog max (s) */
     96#define PPS_POPCORN	4	/* popcorn spike threshold (shift) */
     97#define PPS_INTMIN	2	/* min freq interval (s) (shift) */
     98#define PPS_INTMAX	8	/* max freq interval (s) (shift) */
     99#define PPS_INTCOUNT	4	/* number of consecutive good intervals to
    100				   increase pps_shift or consecutive bad
    101				   intervals to decrease it */
    102#define PPS_MAXWANDER	100000	/* max PPS freq wander (ns/s) */
    103
    104static int pps_valid;		/* signal watchdog counter */
    105static long pps_tf[3];		/* phase median filter */
    106static long pps_jitter;		/* current jitter (ns) */
    107static struct timespec64 pps_fbase; /* beginning of the last freq interval */
    108static int pps_shift;		/* current interval duration (s) (shift) */
    109static int pps_intcnt;		/* interval counter */
    110static s64 pps_freq;		/* frequency offset (scaled ns/s) */
    111static long pps_stabil;		/* current stability (scaled ns/s) */
    112
    113/*
    114 * PPS signal quality monitors
    115 */
    116static long pps_calcnt;		/* calibration intervals */
    117static long pps_jitcnt;		/* jitter limit exceeded */
    118static long pps_stbcnt;		/* stability limit exceeded */
    119static long pps_errcnt;		/* calibration errors */
    120
    121
    122/* PPS kernel consumer compensates the whole phase error immediately.
    123 * Otherwise, reduce the offset by a fixed factor times the time constant.
    124 */
    125static inline s64 ntp_offset_chunk(s64 offset)
    126{
    127	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
    128		return offset;
    129	else
    130		return shift_right(offset, SHIFT_PLL + time_constant);
    131}
    132
    133static inline void pps_reset_freq_interval(void)
    134{
    135	/* the PPS calibration interval may end
    136	   surprisingly early */
    137	pps_shift = PPS_INTMIN;
    138	pps_intcnt = 0;
    139}
    140
    141/**
    142 * pps_clear - Clears the PPS state variables
    143 */
    144static inline void pps_clear(void)
    145{
    146	pps_reset_freq_interval();
    147	pps_tf[0] = 0;
    148	pps_tf[1] = 0;
    149	pps_tf[2] = 0;
    150	pps_fbase.tv_sec = pps_fbase.tv_nsec = 0;
    151	pps_freq = 0;
    152}
    153
    154/* Decrease pps_valid to indicate that another second has passed since
    155 * the last PPS signal. When it reaches 0, indicate that PPS signal is
    156 * missing.
    157 */
    158static inline void pps_dec_valid(void)
    159{
    160	if (pps_valid > 0)
    161		pps_valid--;
    162	else {
    163		time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
    164				 STA_PPSWANDER | STA_PPSERROR);
    165		pps_clear();
    166	}
    167}
    168
    169static inline void pps_set_freq(s64 freq)
    170{
    171	pps_freq = freq;
    172}
    173
    174static inline int is_error_status(int status)
    175{
    176	return (status & (STA_UNSYNC|STA_CLOCKERR))
    177		/* PPS signal lost when either PPS time or
    178		 * PPS frequency synchronization requested
    179		 */
    180		|| ((status & (STA_PPSFREQ|STA_PPSTIME))
    181			&& !(status & STA_PPSSIGNAL))
    182		/* PPS jitter exceeded when
    183		 * PPS time synchronization requested */
    184		|| ((status & (STA_PPSTIME|STA_PPSJITTER))
    185			== (STA_PPSTIME|STA_PPSJITTER))
    186		/* PPS wander exceeded or calibration error when
    187		 * PPS frequency synchronization requested
    188		 */
    189		|| ((status & STA_PPSFREQ)
    190			&& (status & (STA_PPSWANDER|STA_PPSERROR)));
    191}
    192
    193static inline void pps_fill_timex(struct __kernel_timex *txc)
    194{
    195	txc->ppsfreq	   = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
    196					 PPM_SCALE_INV, NTP_SCALE_SHIFT);
    197	txc->jitter	   = pps_jitter;
    198	if (!(time_status & STA_NANO))
    199		txc->jitter = pps_jitter / NSEC_PER_USEC;
    200	txc->shift	   = pps_shift;
    201	txc->stabil	   = pps_stabil;
    202	txc->jitcnt	   = pps_jitcnt;
    203	txc->calcnt	   = pps_calcnt;
    204	txc->errcnt	   = pps_errcnt;
    205	txc->stbcnt	   = pps_stbcnt;
    206}
    207
    208#else /* !CONFIG_NTP_PPS */
    209
    210static inline s64 ntp_offset_chunk(s64 offset)
    211{
    212	return shift_right(offset, SHIFT_PLL + time_constant);
    213}
    214
    215static inline void pps_reset_freq_interval(void) {}
    216static inline void pps_clear(void) {}
    217static inline void pps_dec_valid(void) {}
    218static inline void pps_set_freq(s64 freq) {}
    219
    220static inline int is_error_status(int status)
    221{
    222	return status & (STA_UNSYNC|STA_CLOCKERR);
    223}
    224
    225static inline void pps_fill_timex(struct __kernel_timex *txc)
    226{
    227	/* PPS is not implemented, so these are zero */
    228	txc->ppsfreq	   = 0;
    229	txc->jitter	   = 0;
    230	txc->shift	   = 0;
    231	txc->stabil	   = 0;
    232	txc->jitcnt	   = 0;
    233	txc->calcnt	   = 0;
    234	txc->errcnt	   = 0;
    235	txc->stbcnt	   = 0;
    236}
    237
    238#endif /* CONFIG_NTP_PPS */
    239
    240
    241/**
    242 * ntp_synced - Returns 1 if the NTP status is not UNSYNC
    243 *
    244 */
    245static inline int ntp_synced(void)
    246{
    247	return !(time_status & STA_UNSYNC);
    248}
    249
    250
    251/*
    252 * NTP methods:
    253 */
    254
    255/*
    256 * Update (tick_length, tick_length_base, tick_nsec), based
    257 * on (tick_usec, ntp_tick_adj, time_freq):
    258 */
    259static void ntp_update_frequency(void)
    260{
    261	u64 second_length;
    262	u64 new_base;
    263
    264	second_length		 = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
    265						<< NTP_SCALE_SHIFT;
    266
    267	second_length		+= ntp_tick_adj;
    268	second_length		+= time_freq;
    269
    270	tick_nsec		 = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
    271	new_base		 = div_u64(second_length, NTP_INTERVAL_FREQ);
    272
    273	/*
    274	 * Don't wait for the next second_overflow, apply
    275	 * the change to the tick length immediately:
    276	 */
    277	tick_length		+= new_base - tick_length_base;
    278	tick_length_base	 = new_base;
    279}
    280
    281static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
    282{
    283	time_status &= ~STA_MODE;
    284
    285	if (secs < MINSEC)
    286		return 0;
    287
    288	if (!(time_status & STA_FLL) && (secs <= MAXSEC))
    289		return 0;
    290
    291	time_status |= STA_MODE;
    292
    293	return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
    294}
    295
    296static void ntp_update_offset(long offset)
    297{
    298	s64 freq_adj;
    299	s64 offset64;
    300	long secs;
    301
    302	if (!(time_status & STA_PLL))
    303		return;
    304
    305	if (!(time_status & STA_NANO)) {
    306		/* Make sure the multiplication below won't overflow */
    307		offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC);
    308		offset *= NSEC_PER_USEC;
    309	}
    310
    311	/*
    312	 * Scale the phase adjustment and
    313	 * clamp to the operating range.
    314	 */
    315	offset = clamp(offset, -MAXPHASE, MAXPHASE);
    316
    317	/*
    318	 * Select how the frequency is to be controlled
    319	 * and in which mode (PLL or FLL).
    320	 */
    321	secs = (long)(__ktime_get_real_seconds() - time_reftime);
    322	if (unlikely(time_status & STA_FREQHOLD))
    323		secs = 0;
    324
    325	time_reftime = __ktime_get_real_seconds();
    326
    327	offset64    = offset;
    328	freq_adj    = ntp_update_offset_fll(offset64, secs);
    329
    330	/*
    331	 * Clamp update interval to reduce PLL gain with low
    332	 * sampling rate (e.g. intermittent network connection)
    333	 * to avoid instability.
    334	 */
    335	if (unlikely(secs > 1 << (SHIFT_PLL + 1 + time_constant)))
    336		secs = 1 << (SHIFT_PLL + 1 + time_constant);
    337
    338	freq_adj    += (offset64 * secs) <<
    339			(NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
    340
    341	freq_adj    = min(freq_adj + time_freq, MAXFREQ_SCALED);
    342
    343	time_freq   = max(freq_adj, -MAXFREQ_SCALED);
    344
    345	time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
    346}
    347
    348/**
    349 * ntp_clear - Clears the NTP state variables
    350 */
    351void ntp_clear(void)
    352{
    353	time_adjust	= 0;		/* stop active adjtime() */
    354	time_status	|= STA_UNSYNC;
    355	time_maxerror	= NTP_PHASE_LIMIT;
    356	time_esterror	= NTP_PHASE_LIMIT;
    357
    358	ntp_update_frequency();
    359
    360	tick_length	= tick_length_base;
    361	time_offset	= 0;
    362
    363	ntp_next_leap_sec = TIME64_MAX;
    364	/* Clear PPS state variables */
    365	pps_clear();
    366}
    367
    368
    369u64 ntp_tick_length(void)
    370{
    371	return tick_length;
    372}
    373
    374/**
    375 * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t
    376 *
    377 * Provides the time of the next leapsecond against CLOCK_REALTIME in
    378 * a ktime_t format. Returns KTIME_MAX if no leapsecond is pending.
    379 */
    380ktime_t ntp_get_next_leap(void)
    381{
    382	ktime_t ret;
    383
    384	if ((time_state == TIME_INS) && (time_status & STA_INS))
    385		return ktime_set(ntp_next_leap_sec, 0);
    386	ret = KTIME_MAX;
    387	return ret;
    388}
    389
    390/*
    391 * this routine handles the overflow of the microsecond field
    392 *
    393 * The tricky bits of code to handle the accurate clock support
    394 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
    395 * They were originally developed for SUN and DEC kernels.
    396 * All the kudos should go to Dave for this stuff.
    397 *
    398 * Also handles leap second processing, and returns leap offset
    399 */
    400int second_overflow(time64_t secs)
    401{
    402	s64 delta;
    403	int leap = 0;
    404	s32 rem;
    405
    406	/*
    407	 * Leap second processing. If in leap-insert state at the end of the
    408	 * day, the system clock is set back one second; if in leap-delete
    409	 * state, the system clock is set ahead one second.
    410	 */
    411	switch (time_state) {
    412	case TIME_OK:
    413		if (time_status & STA_INS) {
    414			time_state = TIME_INS;
    415			div_s64_rem(secs, SECS_PER_DAY, &rem);
    416			ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
    417		} else if (time_status & STA_DEL) {
    418			time_state = TIME_DEL;
    419			div_s64_rem(secs + 1, SECS_PER_DAY, &rem);
    420			ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
    421		}
    422		break;
    423	case TIME_INS:
    424		if (!(time_status & STA_INS)) {
    425			ntp_next_leap_sec = TIME64_MAX;
    426			time_state = TIME_OK;
    427		} else if (secs == ntp_next_leap_sec) {
    428			leap = -1;
    429			time_state = TIME_OOP;
    430			printk(KERN_NOTICE
    431				"Clock: inserting leap second 23:59:60 UTC\n");
    432		}
    433		break;
    434	case TIME_DEL:
    435		if (!(time_status & STA_DEL)) {
    436			ntp_next_leap_sec = TIME64_MAX;
    437			time_state = TIME_OK;
    438		} else if (secs == ntp_next_leap_sec) {
    439			leap = 1;
    440			ntp_next_leap_sec = TIME64_MAX;
    441			time_state = TIME_WAIT;
    442			printk(KERN_NOTICE
    443				"Clock: deleting leap second 23:59:59 UTC\n");
    444		}
    445		break;
    446	case TIME_OOP:
    447		ntp_next_leap_sec = TIME64_MAX;
    448		time_state = TIME_WAIT;
    449		break;
    450	case TIME_WAIT:
    451		if (!(time_status & (STA_INS | STA_DEL)))
    452			time_state = TIME_OK;
    453		break;
    454	}
    455
    456
    457	/* Bump the maxerror field */
    458	time_maxerror += MAXFREQ / NSEC_PER_USEC;
    459	if (time_maxerror > NTP_PHASE_LIMIT) {
    460		time_maxerror = NTP_PHASE_LIMIT;
    461		time_status |= STA_UNSYNC;
    462	}
    463
    464	/* Compute the phase adjustment for the next second */
    465	tick_length	 = tick_length_base;
    466
    467	delta		 = ntp_offset_chunk(time_offset);
    468	time_offset	-= delta;
    469	tick_length	+= delta;
    470
    471	/* Check PPS signal */
    472	pps_dec_valid();
    473
    474	if (!time_adjust)
    475		goto out;
    476
    477	if (time_adjust > MAX_TICKADJ) {
    478		time_adjust -= MAX_TICKADJ;
    479		tick_length += MAX_TICKADJ_SCALED;
    480		goto out;
    481	}
    482
    483	if (time_adjust < -MAX_TICKADJ) {
    484		time_adjust += MAX_TICKADJ;
    485		tick_length -= MAX_TICKADJ_SCALED;
    486		goto out;
    487	}
    488
    489	tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
    490							 << NTP_SCALE_SHIFT;
    491	time_adjust = 0;
    492
    493out:
    494	return leap;
    495}
    496
    497#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
    498static void sync_hw_clock(struct work_struct *work);
    499static DECLARE_WORK(sync_work, sync_hw_clock);
    500static struct hrtimer sync_hrtimer;
    501#define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
    502
    503static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
    504{
    505	queue_work(system_freezable_power_efficient_wq, &sync_work);
    506
    507	return HRTIMER_NORESTART;
    508}
    509
    510static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
    511{
    512	ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
    513
    514	if (retry)
    515		exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
    516	else
    517		exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
    518
    519	hrtimer_start(&sync_hrtimer, exp, HRTIMER_MODE_ABS);
    520}
    521
    522/*
    523 * Check whether @now is correct versus the required time to update the RTC
    524 * and calculate the value which needs to be written to the RTC so that the
    525 * next seconds increment of the RTC after the write is aligned with the next
    526 * seconds increment of clock REALTIME.
    527 *
    528 * tsched     t1 write(t2.tv_sec - 1sec))	t2 RTC increments seconds
    529 *
    530 * t2.tv_nsec == 0
    531 * tsched = t2 - set_offset_nsec
    532 * newval = t2 - NSEC_PER_SEC
    533 *
    534 * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC
    535 *
    536 * As the execution of this code is not guaranteed to happen exactly at
    537 * tsched this allows it to happen within a fuzzy region:
    538 *
    539 *	abs(now - tsched) < FUZZ
    540 *
    541 * If @now is not inside the allowed window the function returns false.
    542 */
    543static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec,
    544				  struct timespec64 *to_set,
    545				  const struct timespec64 *now)
    546{
    547	/* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */
    548	const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
    549	struct timespec64 delay = {.tv_sec = -1,
    550				   .tv_nsec = set_offset_nsec};
    551
    552	*to_set = timespec64_add(*now, delay);
    553
    554	if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
    555		to_set->tv_nsec = 0;
    556		return true;
    557	}
    558
    559	if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
    560		to_set->tv_sec++;
    561		to_set->tv_nsec = 0;
    562		return true;
    563	}
    564	return false;
    565}
    566
    567#ifdef CONFIG_GENERIC_CMOS_UPDATE
    568int __weak update_persistent_clock64(struct timespec64 now64)
    569{
    570	return -ENODEV;
    571}
    572#else
    573static inline int update_persistent_clock64(struct timespec64 now64)
    574{
    575	return -ENODEV;
    576}
    577#endif
    578
    579#ifdef CONFIG_RTC_SYSTOHC
    580/* Save NTP synchronized time to the RTC */
    581static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
    582{
    583	struct rtc_device *rtc;
    584	struct rtc_time tm;
    585	int err = -ENODEV;
    586
    587	rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
    588	if (!rtc)
    589		return -ENODEV;
    590
    591	if (!rtc->ops || !rtc->ops->set_time)
    592		goto out_close;
    593
    594	/* First call might not have the correct offset */
    595	if (*offset_nsec == rtc->set_offset_nsec) {
    596		rtc_time64_to_tm(to_set->tv_sec, &tm);
    597		err = rtc_set_time(rtc, &tm);
    598	} else {
    599		/* Store the update offset and let the caller try again */
    600		*offset_nsec = rtc->set_offset_nsec;
    601		err = -EAGAIN;
    602	}
    603out_close:
    604	rtc_class_close(rtc);
    605	return err;
    606}
    607#else
    608static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
    609{
    610	return -ENODEV;
    611}
    612#endif
    613
    614/*
    615 * If we have an externally synchronized Linux clock, then update RTC clock
    616 * accordingly every ~11 minutes. Generally RTCs can only store second
    617 * precision, but many RTCs will adjust the phase of their second tick to
    618 * match the moment of update. This infrastructure arranges to call to the RTC
    619 * set at the correct moment to phase synchronize the RTC second tick over
    620 * with the kernel clock.
    621 */
    622static void sync_hw_clock(struct work_struct *work)
    623{
    624	/*
    625	 * The default synchronization offset is 500ms for the deprecated
    626	 * update_persistent_clock64() under the assumption that it uses
    627	 * the infamous CMOS clock (MC146818).
    628	 */
    629	static unsigned long offset_nsec = NSEC_PER_SEC / 2;
    630	struct timespec64 now, to_set;
    631	int res = -EAGAIN;
    632
    633	/*
    634	 * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer()
    635	 * managed to schedule the work between the timer firing and the
    636	 * work being able to rearm the timer. Wait for the timer to expire.
    637	 */
    638	if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer))
    639		return;
    640
    641	ktime_get_real_ts64(&now);
    642	/* If @now is not in the allowed window, try again */
    643	if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now))
    644		goto rearm;
    645
    646	/* Take timezone adjusted RTCs into account */
    647	if (persistent_clock_is_local)
    648		to_set.tv_sec -= (sys_tz.tz_minuteswest * 60);
    649
    650	/* Try the legacy RTC first. */
    651	res = update_persistent_clock64(to_set);
    652	if (res != -ENODEV)
    653		goto rearm;
    654
    655	/* Try the RTC class */
    656	res = update_rtc(&to_set, &offset_nsec);
    657	if (res == -ENODEV)
    658		return;
    659rearm:
    660	sched_sync_hw_clock(offset_nsec, res != 0);
    661}
    662
    663void ntp_notify_cmos_timer(void)
    664{
    665	/*
    666	 * When the work is currently executed but has not yet the timer
    667	 * rearmed this queues the work immediately again. No big issue,
    668	 * just a pointless work scheduled.
    669	 */
    670	if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer))
    671		queue_work(system_freezable_power_efficient_wq, &sync_work);
    672}
    673
    674static void __init ntp_init_cmos_sync(void)
    675{
    676	hrtimer_init(&sync_hrtimer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
    677	sync_hrtimer.function = sync_timer_callback;
    678}
    679#else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
    680static inline void __init ntp_init_cmos_sync(void) { }
    681#endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
    682
    683/*
    684 * Propagate a new txc->status value into the NTP state:
    685 */
    686static inline void process_adj_status(const struct __kernel_timex *txc)
    687{
    688	if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
    689		time_state = TIME_OK;
    690		time_status = STA_UNSYNC;
    691		ntp_next_leap_sec = TIME64_MAX;
    692		/* restart PPS frequency calibration */
    693		pps_reset_freq_interval();
    694	}
    695
    696	/*
    697	 * If we turn on PLL adjustments then reset the
    698	 * reference time to current time.
    699	 */
    700	if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
    701		time_reftime = __ktime_get_real_seconds();
    702
    703	/* only set allowed bits */
    704	time_status &= STA_RONLY;
    705	time_status |= txc->status & ~STA_RONLY;
    706}
    707
    708
    709static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
    710					  s32 *time_tai)
    711{
    712	if (txc->modes & ADJ_STATUS)
    713		process_adj_status(txc);
    714
    715	if (txc->modes & ADJ_NANO)
    716		time_status |= STA_NANO;
    717
    718	if (txc->modes & ADJ_MICRO)
    719		time_status &= ~STA_NANO;
    720
    721	if (txc->modes & ADJ_FREQUENCY) {
    722		time_freq = txc->freq * PPM_SCALE;
    723		time_freq = min(time_freq, MAXFREQ_SCALED);
    724		time_freq = max(time_freq, -MAXFREQ_SCALED);
    725		/* update pps_freq */
    726		pps_set_freq(time_freq);
    727	}
    728
    729	if (txc->modes & ADJ_MAXERROR)
    730		time_maxerror = txc->maxerror;
    731
    732	if (txc->modes & ADJ_ESTERROR)
    733		time_esterror = txc->esterror;
    734
    735	if (txc->modes & ADJ_TIMECONST) {
    736		time_constant = txc->constant;
    737		if (!(time_status & STA_NANO))
    738			time_constant += 4;
    739		time_constant = min(time_constant, (long)MAXTC);
    740		time_constant = max(time_constant, 0l);
    741	}
    742
    743	if (txc->modes & ADJ_TAI &&
    744			txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
    745		*time_tai = txc->constant;
    746
    747	if (txc->modes & ADJ_OFFSET)
    748		ntp_update_offset(txc->offset);
    749
    750	if (txc->modes & ADJ_TICK)
    751		tick_usec = txc->tick;
    752
    753	if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
    754		ntp_update_frequency();
    755}
    756
    757
    758/*
    759 * adjtimex mainly allows reading (and writing, if superuser) of
    760 * kernel time-keeping variables. used by xntpd.
    761 */
    762int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
    763		  s32 *time_tai, struct audit_ntp_data *ad)
    764{
    765	int result;
    766
    767	if (txc->modes & ADJ_ADJTIME) {
    768		long save_adjust = time_adjust;
    769
    770		if (!(txc->modes & ADJ_OFFSET_READONLY)) {
    771			/* adjtime() is independent from ntp_adjtime() */
    772			time_adjust = txc->offset;
    773			ntp_update_frequency();
    774
    775			audit_ntp_set_old(ad, AUDIT_NTP_ADJUST,	save_adjust);
    776			audit_ntp_set_new(ad, AUDIT_NTP_ADJUST,	time_adjust);
    777		}
    778		txc->offset = save_adjust;
    779	} else {
    780		/* If there are input parameters, then process them: */
    781		if (txc->modes) {
    782			audit_ntp_set_old(ad, AUDIT_NTP_OFFSET,	time_offset);
    783			audit_ntp_set_old(ad, AUDIT_NTP_FREQ,	time_freq);
    784			audit_ntp_set_old(ad, AUDIT_NTP_STATUS,	time_status);
    785			audit_ntp_set_old(ad, AUDIT_NTP_TAI,	*time_tai);
    786			audit_ntp_set_old(ad, AUDIT_NTP_TICK,	tick_usec);
    787
    788			process_adjtimex_modes(txc, time_tai);
    789
    790			audit_ntp_set_new(ad, AUDIT_NTP_OFFSET,	time_offset);
    791			audit_ntp_set_new(ad, AUDIT_NTP_FREQ,	time_freq);
    792			audit_ntp_set_new(ad, AUDIT_NTP_STATUS,	time_status);
    793			audit_ntp_set_new(ad, AUDIT_NTP_TAI,	*time_tai);
    794			audit_ntp_set_new(ad, AUDIT_NTP_TICK,	tick_usec);
    795		}
    796
    797		txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
    798				  NTP_SCALE_SHIFT);
    799		if (!(time_status & STA_NANO))
    800			txc->offset = (u32)txc->offset / NSEC_PER_USEC;
    801	}
    802
    803	result = time_state;	/* mostly `TIME_OK' */
    804	/* check for errors */
    805	if (is_error_status(time_status))
    806		result = TIME_ERROR;
    807
    808	txc->freq	   = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
    809					 PPM_SCALE_INV, NTP_SCALE_SHIFT);
    810	txc->maxerror	   = time_maxerror;
    811	txc->esterror	   = time_esterror;
    812	txc->status	   = time_status;
    813	txc->constant	   = time_constant;
    814	txc->precision	   = 1;
    815	txc->tolerance	   = MAXFREQ_SCALED / PPM_SCALE;
    816	txc->tick	   = tick_usec;
    817	txc->tai	   = *time_tai;
    818
    819	/* fill PPS status fields */
    820	pps_fill_timex(txc);
    821
    822	txc->time.tv_sec = ts->tv_sec;
    823	txc->time.tv_usec = ts->tv_nsec;
    824	if (!(time_status & STA_NANO))
    825		txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC;
    826
    827	/* Handle leapsec adjustments */
    828	if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) {
    829		if ((time_state == TIME_INS) && (time_status & STA_INS)) {
    830			result = TIME_OOP;
    831			txc->tai++;
    832			txc->time.tv_sec--;
    833		}
    834		if ((time_state == TIME_DEL) && (time_status & STA_DEL)) {
    835			result = TIME_WAIT;
    836			txc->tai--;
    837			txc->time.tv_sec++;
    838		}
    839		if ((time_state == TIME_OOP) &&
    840					(ts->tv_sec == ntp_next_leap_sec)) {
    841			result = TIME_WAIT;
    842		}
    843	}
    844
    845	return result;
    846}
    847
    848#ifdef	CONFIG_NTP_PPS
    849
    850/* actually struct pps_normtime is good old struct timespec, but it is
    851 * semantically different (and it is the reason why it was invented):
    852 * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
    853 * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
    854struct pps_normtime {
    855	s64		sec;	/* seconds */
    856	long		nsec;	/* nanoseconds */
    857};
    858
    859/* normalize the timestamp so that nsec is in the
    860   ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
    861static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts)
    862{
    863	struct pps_normtime norm = {
    864		.sec = ts.tv_sec,
    865		.nsec = ts.tv_nsec
    866	};
    867
    868	if (norm.nsec > (NSEC_PER_SEC >> 1)) {
    869		norm.nsec -= NSEC_PER_SEC;
    870		norm.sec++;
    871	}
    872
    873	return norm;
    874}
    875
    876/* get current phase correction and jitter */
    877static inline long pps_phase_filter_get(long *jitter)
    878{
    879	*jitter = pps_tf[0] - pps_tf[1];
    880	if (*jitter < 0)
    881		*jitter = -*jitter;
    882
    883	/* TODO: test various filters */
    884	return pps_tf[0];
    885}
    886
    887/* add the sample to the phase filter */
    888static inline void pps_phase_filter_add(long err)
    889{
    890	pps_tf[2] = pps_tf[1];
    891	pps_tf[1] = pps_tf[0];
    892	pps_tf[0] = err;
    893}
    894
    895/* decrease frequency calibration interval length.
    896 * It is halved after four consecutive unstable intervals.
    897 */
    898static inline void pps_dec_freq_interval(void)
    899{
    900	if (--pps_intcnt <= -PPS_INTCOUNT) {
    901		pps_intcnt = -PPS_INTCOUNT;
    902		if (pps_shift > PPS_INTMIN) {
    903			pps_shift--;
    904			pps_intcnt = 0;
    905		}
    906	}
    907}
    908
    909/* increase frequency calibration interval length.
    910 * It is doubled after four consecutive stable intervals.
    911 */
    912static inline void pps_inc_freq_interval(void)
    913{
    914	if (++pps_intcnt >= PPS_INTCOUNT) {
    915		pps_intcnt = PPS_INTCOUNT;
    916		if (pps_shift < PPS_INTMAX) {
    917			pps_shift++;
    918			pps_intcnt = 0;
    919		}
    920	}
    921}
    922
    923/* update clock frequency based on MONOTONIC_RAW clock PPS signal
    924 * timestamps
    925 *
    926 * At the end of the calibration interval the difference between the
    927 * first and last MONOTONIC_RAW clock timestamps divided by the length
    928 * of the interval becomes the frequency update. If the interval was
    929 * too long, the data are discarded.
    930 * Returns the difference between old and new frequency values.
    931 */
    932static long hardpps_update_freq(struct pps_normtime freq_norm)
    933{
    934	long delta, delta_mod;
    935	s64 ftemp;
    936
    937	/* check if the frequency interval was too long */
    938	if (freq_norm.sec > (2 << pps_shift)) {
    939		time_status |= STA_PPSERROR;
    940		pps_errcnt++;
    941		pps_dec_freq_interval();
    942		printk_deferred(KERN_ERR
    943			"hardpps: PPSERROR: interval too long - %lld s\n",
    944			freq_norm.sec);
    945		return 0;
    946	}
    947
    948	/* here the raw frequency offset and wander (stability) is
    949	 * calculated. If the wander is less than the wander threshold
    950	 * the interval is increased; otherwise it is decreased.
    951	 */
    952	ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
    953			freq_norm.sec);
    954	delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
    955	pps_freq = ftemp;
    956	if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
    957		printk_deferred(KERN_WARNING
    958				"hardpps: PPSWANDER: change=%ld\n", delta);
    959		time_status |= STA_PPSWANDER;
    960		pps_stbcnt++;
    961		pps_dec_freq_interval();
    962	} else {	/* good sample */
    963		pps_inc_freq_interval();
    964	}
    965
    966	/* the stability metric is calculated as the average of recent
    967	 * frequency changes, but is used only for performance
    968	 * monitoring
    969	 */
    970	delta_mod = delta;
    971	if (delta_mod < 0)
    972		delta_mod = -delta_mod;
    973	pps_stabil += (div_s64(((s64)delta_mod) <<
    974				(NTP_SCALE_SHIFT - SHIFT_USEC),
    975				NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN;
    976
    977	/* if enabled, the system clock frequency is updated */
    978	if ((time_status & STA_PPSFREQ) != 0 &&
    979	    (time_status & STA_FREQHOLD) == 0) {
    980		time_freq = pps_freq;
    981		ntp_update_frequency();
    982	}
    983
    984	return delta;
    985}
    986
    987/* correct REALTIME clock phase error against PPS signal */
    988static void hardpps_update_phase(long error)
    989{
    990	long correction = -error;
    991	long jitter;
    992
    993	/* add the sample to the median filter */
    994	pps_phase_filter_add(correction);
    995	correction = pps_phase_filter_get(&jitter);
    996
    997	/* Nominal jitter is due to PPS signal noise. If it exceeds the
    998	 * threshold, the sample is discarded; otherwise, if so enabled,
    999	 * the time offset is updated.
   1000	 */
   1001	if (jitter > (pps_jitter << PPS_POPCORN)) {
   1002		printk_deferred(KERN_WARNING
   1003				"hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
   1004				jitter, (pps_jitter << PPS_POPCORN));
   1005		time_status |= STA_PPSJITTER;
   1006		pps_jitcnt++;
   1007	} else if (time_status & STA_PPSTIME) {
   1008		/* correct the time using the phase offset */
   1009		time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
   1010				NTP_INTERVAL_FREQ);
   1011		/* cancel running adjtime() */
   1012		time_adjust = 0;
   1013	}
   1014	/* update jitter */
   1015	pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN;
   1016}
   1017
   1018/*
   1019 * __hardpps() - discipline CPU clock oscillator to external PPS signal
   1020 *
   1021 * This routine is called at each PPS signal arrival in order to
   1022 * discipline the CPU clock oscillator to the PPS signal. It takes two
   1023 * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former
   1024 * is used to correct clock phase error and the latter is used to
   1025 * correct the frequency.
   1026 *
   1027 * This code is based on David Mills's reference nanokernel
   1028 * implementation. It was mostly rewritten but keeps the same idea.
   1029 */
   1030void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
   1031{
   1032	struct pps_normtime pts_norm, freq_norm;
   1033
   1034	pts_norm = pps_normalize_ts(*phase_ts);
   1035
   1036	/* clear the error bits, they will be set again if needed */
   1037	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
   1038
   1039	/* indicate signal presence */
   1040	time_status |= STA_PPSSIGNAL;
   1041	pps_valid = PPS_VALID;
   1042
   1043	/* when called for the first time,
   1044	 * just start the frequency interval */
   1045	if (unlikely(pps_fbase.tv_sec == 0)) {
   1046		pps_fbase = *raw_ts;
   1047		return;
   1048	}
   1049
   1050	/* ok, now we have a base for frequency calculation */
   1051	freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase));
   1052
   1053	/* check that the signal is in the range
   1054	 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
   1055	if ((freq_norm.sec == 0) ||
   1056			(freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
   1057			(freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
   1058		time_status |= STA_PPSJITTER;
   1059		/* restart the frequency calibration interval */
   1060		pps_fbase = *raw_ts;
   1061		printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n");
   1062		return;
   1063	}
   1064
   1065	/* signal is ok */
   1066
   1067	/* check if the current frequency interval is finished */
   1068	if (freq_norm.sec >= (1 << pps_shift)) {
   1069		pps_calcnt++;
   1070		/* restart the frequency calibration interval */
   1071		pps_fbase = *raw_ts;
   1072		hardpps_update_freq(freq_norm);
   1073	}
   1074
   1075	hardpps_update_phase(pts_norm.nsec);
   1076
   1077}
   1078#endif	/* CONFIG_NTP_PPS */
   1079
   1080static int __init ntp_tick_adj_setup(char *str)
   1081{
   1082	int rc = kstrtos64(str, 0, &ntp_tick_adj);
   1083	if (rc)
   1084		return rc;
   1085
   1086	ntp_tick_adj <<= NTP_SCALE_SHIFT;
   1087	return 1;
   1088}
   1089
   1090__setup("ntp_tick_adj=", ntp_tick_adj_setup);
   1091
   1092void __init ntp_init(void)
   1093{
   1094	ntp_clear();
   1095	ntp_init_cmos_sync();
   1096}