cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

time.c (12487B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * linux/arch/ia64/kernel/time.c
      4 *
      5 * Copyright (C) 1998-2003 Hewlett-Packard Co
      6 *	Stephane Eranian <eranian@hpl.hp.com>
      7 *	David Mosberger <davidm@hpl.hp.com>
      8 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
      9 * Copyright (C) 1999-2000 VA Linux Systems
     10 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
     11 */
     12
     13#include <linux/cpu.h>
     14#include <linux/init.h>
     15#include <linux/kernel.h>
     16#include <linux/module.h>
     17#include <linux/profile.h>
     18#include <linux/sched.h>
     19#include <linux/time.h>
     20#include <linux/nmi.h>
     21#include <linux/interrupt.h>
     22#include <linux/efi.h>
     23#include <linux/timex.h>
     24#include <linux/timekeeper_internal.h>
     25#include <linux/platform_device.h>
     26#include <linux/sched/cputime.h>
     27
     28#include <asm/delay.h>
     29#include <asm/efi.h>
     30#include <asm/hw_irq.h>
     31#include <asm/ptrace.h>
     32#include <asm/sal.h>
     33#include <asm/sections.h>
     34
     35#include "fsyscall_gtod_data.h"
     36#include "irq.h"
     37
     38static u64 itc_get_cycles(struct clocksource *cs);
     39
     40struct fsyscall_gtod_data_t fsyscall_gtod_data;
     41
     42struct itc_jitter_data_t itc_jitter_data;
     43
     44volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
     45
     46#ifdef CONFIG_IA64_DEBUG_IRQ
     47
     48unsigned long last_cli_ip;
     49EXPORT_SYMBOL(last_cli_ip);
     50
     51#endif
     52
     53static struct clocksource clocksource_itc = {
     54	.name           = "itc",
     55	.rating         = 350,
     56	.read           = itc_get_cycles,
     57	.mask           = CLOCKSOURCE_MASK(64),
     58	.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
     59};
     60static struct clocksource *itc_clocksource;
     61
     62#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
     63
     64#include <linux/kernel_stat.h>
     65
     66extern u64 cycle_to_nsec(u64 cyc);
     67
     68void vtime_flush(struct task_struct *tsk)
     69{
     70	struct thread_info *ti = task_thread_info(tsk);
     71	u64 delta;
     72
     73	if (ti->utime)
     74		account_user_time(tsk, cycle_to_nsec(ti->utime));
     75
     76	if (ti->gtime)
     77		account_guest_time(tsk, cycle_to_nsec(ti->gtime));
     78
     79	if (ti->idle_time)
     80		account_idle_time(cycle_to_nsec(ti->idle_time));
     81
     82	if (ti->stime) {
     83		delta = cycle_to_nsec(ti->stime);
     84		account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
     85	}
     86
     87	if (ti->hardirq_time) {
     88		delta = cycle_to_nsec(ti->hardirq_time);
     89		account_system_index_time(tsk, delta, CPUTIME_IRQ);
     90	}
     91
     92	if (ti->softirq_time) {
     93		delta = cycle_to_nsec(ti->softirq_time);
     94		account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
     95	}
     96
     97	ti->utime = 0;
     98	ti->gtime = 0;
     99	ti->idle_time = 0;
    100	ti->stime = 0;
    101	ti->hardirq_time = 0;
    102	ti->softirq_time = 0;
    103}
    104
    105/*
    106 * Called from the context switch with interrupts disabled, to charge all
    107 * accumulated times to the current process, and to prepare accounting on
    108 * the next process.
    109 */
    110void arch_vtime_task_switch(struct task_struct *prev)
    111{
    112	struct thread_info *pi = task_thread_info(prev);
    113	struct thread_info *ni = task_thread_info(current);
    114
    115	ni->ac_stamp = pi->ac_stamp;
    116	ni->ac_stime = ni->ac_utime = 0;
    117}
    118
    119/*
    120 * Account time for a transition between system, hard irq or soft irq state.
    121 * Note that this function is called with interrupts enabled.
    122 */
    123static __u64 vtime_delta(struct task_struct *tsk)
    124{
    125	struct thread_info *ti = task_thread_info(tsk);
    126	__u64 now, delta_stime;
    127
    128	WARN_ON_ONCE(!irqs_disabled());
    129
    130	now = ia64_get_itc();
    131	delta_stime = now - ti->ac_stamp;
    132	ti->ac_stamp = now;
    133
    134	return delta_stime;
    135}
    136
    137void vtime_account_kernel(struct task_struct *tsk)
    138{
    139	struct thread_info *ti = task_thread_info(tsk);
    140	__u64 stime = vtime_delta(tsk);
    141
    142	if (tsk->flags & PF_VCPU)
    143		ti->gtime += stime;
    144	else
    145		ti->stime += stime;
    146}
    147EXPORT_SYMBOL_GPL(vtime_account_kernel);
    148
    149void vtime_account_idle(struct task_struct *tsk)
    150{
    151	struct thread_info *ti = task_thread_info(tsk);
    152
    153	ti->idle_time += vtime_delta(tsk);
    154}
    155
    156void vtime_account_softirq(struct task_struct *tsk)
    157{
    158	struct thread_info *ti = task_thread_info(tsk);
    159
    160	ti->softirq_time += vtime_delta(tsk);
    161}
    162
    163void vtime_account_hardirq(struct task_struct *tsk)
    164{
    165	struct thread_info *ti = task_thread_info(tsk);
    166
    167	ti->hardirq_time += vtime_delta(tsk);
    168}
    169
    170#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
    171
    172static irqreturn_t
    173timer_interrupt (int irq, void *dev_id)
    174{
    175	unsigned long new_itm;
    176
    177	if (cpu_is_offline(smp_processor_id())) {
    178		return IRQ_HANDLED;
    179	}
    180
    181	new_itm = local_cpu_data->itm_next;
    182
    183	if (!time_after(ia64_get_itc(), new_itm))
    184		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
    185		       ia64_get_itc(), new_itm);
    186
    187	while (1) {
    188		new_itm += local_cpu_data->itm_delta;
    189
    190		legacy_timer_tick(smp_processor_id() == time_keeper_id);
    191
    192		local_cpu_data->itm_next = new_itm;
    193
    194		if (time_after(new_itm, ia64_get_itc()))
    195			break;
    196
    197		/*
    198		 * Allow IPIs to interrupt the timer loop.
    199		 */
    200		local_irq_enable();
    201		local_irq_disable();
    202	}
    203
    204	do {
    205		/*
    206		 * If we're too close to the next clock tick for
    207		 * comfort, we increase the safety margin by
    208		 * intentionally dropping the next tick(s).  We do NOT
    209		 * update itm.next because that would force us to call
    210		 * xtime_update() which in turn would let our clock run
    211		 * too fast (with the potentially devastating effect
    212		 * of losing monotony of time).
    213		 */
    214		while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
    215			new_itm += local_cpu_data->itm_delta;
    216		ia64_set_itm(new_itm);
    217		/* double check, in case we got hit by a (slow) PMI: */
    218	} while (time_after_eq(ia64_get_itc(), new_itm));
    219	return IRQ_HANDLED;
    220}
    221
    222/*
    223 * Encapsulate access to the itm structure for SMP.
    224 */
    225void
    226ia64_cpu_local_tick (void)
    227{
    228	int cpu = smp_processor_id();
    229	unsigned long shift = 0, delta;
    230
    231	/* arrange for the cycle counter to generate a timer interrupt: */
    232	ia64_set_itv(IA64_TIMER_VECTOR);
    233
    234	delta = local_cpu_data->itm_delta;
    235	/*
    236	 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
    237	 * same time:
    238	 */
    239	if (cpu) {
    240		unsigned long hi = 1UL << ia64_fls(cpu);
    241		shift = (2*(cpu - hi) + 1) * delta/hi/2;
    242	}
    243	local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
    244	ia64_set_itm(local_cpu_data->itm_next);
    245}
    246
    247static int nojitter;
    248
    249static int __init nojitter_setup(char *str)
    250{
    251	nojitter = 1;
    252	printk("Jitter checking for ITC timers disabled\n");
    253	return 1;
    254}
    255
    256__setup("nojitter", nojitter_setup);
    257
    258
    259void ia64_init_itm(void)
    260{
    261	unsigned long platform_base_freq, itc_freq;
    262	struct pal_freq_ratio itc_ratio, proc_ratio;
    263	long status, platform_base_drift, itc_drift;
    264
    265	/*
    266	 * According to SAL v2.6, we need to use a SAL call to determine the platform base
    267	 * frequency and then a PAL call to determine the frequency ratio between the ITC
    268	 * and the base frequency.
    269	 */
    270	status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
    271				    &platform_base_freq, &platform_base_drift);
    272	if (status != 0) {
    273		printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
    274	} else {
    275		status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
    276		if (status != 0)
    277			printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
    278	}
    279	if (status != 0) {
    280		/* invent "random" values */
    281		printk(KERN_ERR
    282		       "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
    283		platform_base_freq = 100000000;
    284		platform_base_drift = -1;	/* no drift info */
    285		itc_ratio.num = 3;
    286		itc_ratio.den = 1;
    287	}
    288	if (platform_base_freq < 40000000) {
    289		printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
    290		       platform_base_freq);
    291		platform_base_freq = 75000000;
    292		platform_base_drift = -1;
    293	}
    294	if (!proc_ratio.den)
    295		proc_ratio.den = 1;	/* avoid division by zero */
    296	if (!itc_ratio.den)
    297		itc_ratio.den = 1;	/* avoid division by zero */
    298
    299	itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
    300
    301	local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
    302	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
    303	       "ITC freq=%lu.%03luMHz", smp_processor_id(),
    304	       platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
    305	       itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
    306
    307	if (platform_base_drift != -1) {
    308		itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
    309		printk("+/-%ldppm\n", itc_drift);
    310	} else {
    311		itc_drift = -1;
    312		printk("\n");
    313	}
    314
    315	local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
    316	local_cpu_data->itc_freq = itc_freq;
    317	local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
    318	local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
    319					+ itc_freq/2)/itc_freq;
    320
    321	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
    322#ifdef CONFIG_SMP
    323		/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
    324		 * Jitter compensation requires a cmpxchg which may limit
    325		 * the scalability of the syscalls for retrieving time.
    326		 * The ITC synchronization is usually successful to within a few
    327		 * ITC ticks but this is not a sure thing. If you need to improve
    328		 * timer performance in SMP situations then boot the kernel with the
    329		 * "nojitter" option. However, doing so may result in time fluctuating (maybe
    330		 * even going backward) if the ITC offsets between the individual CPUs
    331		 * are too large.
    332		 */
    333		if (!nojitter)
    334			itc_jitter_data.itc_jitter = 1;
    335#endif
    336	} else
    337		/*
    338		 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
    339		 * ITC values may fluctuate significantly between processors.
    340		 * Clock should not be used for hrtimers. Mark itc as only
    341		 * useful for boot and testing.
    342		 *
    343		 * Note that jitter compensation is off! There is no point of
    344		 * synchronizing ITCs since they may be large differentials
    345		 * that change over time.
    346		 *
    347		 * The only way to fix this would be to repeatedly sync the
    348		 * ITCs. Until that time we have to avoid ITC.
    349		 */
    350		clocksource_itc.rating = 50;
    351
    352	/* avoid softlock up message when cpu is unplug and plugged again. */
    353	touch_softlockup_watchdog();
    354
    355	/* Setup the CPU local timer tick */
    356	ia64_cpu_local_tick();
    357
    358	if (!itc_clocksource) {
    359		clocksource_register_hz(&clocksource_itc,
    360						local_cpu_data->itc_freq);
    361		itc_clocksource = &clocksource_itc;
    362	}
    363}
    364
    365static u64 itc_get_cycles(struct clocksource *cs)
    366{
    367	unsigned long lcycle, now, ret;
    368
    369	if (!itc_jitter_data.itc_jitter)
    370		return get_cycles();
    371
    372	lcycle = itc_jitter_data.itc_lastcycle;
    373	now = get_cycles();
    374	if (lcycle && time_after(lcycle, now))
    375		return lcycle;
    376
    377	/*
    378	 * Keep track of the last timer value returned.
    379	 * In an SMP environment, you could lose out in contention of
    380	 * cmpxchg. If so, your cmpxchg returns new value which the
    381	 * winner of contention updated to. Use the new value instead.
    382	 */
    383	ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
    384	if (unlikely(ret != lcycle))
    385		return ret;
    386
    387	return now;
    388}
    389
    390void read_persistent_clock64(struct timespec64 *ts)
    391{
    392	efi_gettimeofday(ts);
    393}
    394
    395void __init
    396time_init (void)
    397{
    398	register_percpu_irq(IA64_TIMER_VECTOR, timer_interrupt, IRQF_IRQPOLL,
    399			    "timer");
    400	ia64_init_itm();
    401}
    402
    403/*
    404 * Generic udelay assumes that if preemption is allowed and the thread
    405 * migrates to another CPU, that the ITC values are synchronized across
    406 * all CPUs.
    407 */
    408static void
    409ia64_itc_udelay (unsigned long usecs)
    410{
    411	unsigned long start = ia64_get_itc();
    412	unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
    413
    414	while (time_before(ia64_get_itc(), end))
    415		cpu_relax();
    416}
    417
    418void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
    419
    420void
    421udelay (unsigned long usecs)
    422{
    423	(*ia64_udelay)(usecs);
    424}
    425EXPORT_SYMBOL(udelay);
    426
    427/* IA64 doesn't cache the timezone */
    428void update_vsyscall_tz(void)
    429{
    430}
    431
    432void update_vsyscall(struct timekeeper *tk)
    433{
    434	write_seqcount_begin(&fsyscall_gtod_data.seq);
    435
    436	/* copy vsyscall data */
    437	fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
    438	fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
    439	fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
    440	fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
    441	fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
    442
    443	fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
    444	fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
    445
    446	fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
    447					      + tk->wall_to_monotonic.tv_sec;
    448	fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
    449						+ ((u64)tk->wall_to_monotonic.tv_nsec
    450							<< tk->tkr_mono.shift);
    451
    452	/* normalize */
    453	while (fsyscall_gtod_data.monotonic_time.snsec >=
    454					(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
    455		fsyscall_gtod_data.monotonic_time.snsec -=
    456					((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
    457		fsyscall_gtod_data.monotonic_time.sec++;
    458	}
    459
    460	write_seqcount_end(&fsyscall_gtod_data.seq);
    461}
    462