cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cevt-r4k.c (8765B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (C) 2007 MIPS Technologies, Inc.
      7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
      8 */
      9#include <linux/clockchips.h>
     10#include <linux/interrupt.h>
     11#include <linux/cpufreq.h>
     12#include <linux/percpu.h>
     13#include <linux/smp.h>
     14#include <linux/irq.h>
     15
     16#include <asm/time.h>
     17#include <asm/cevt-r4k.h>
     18
     19static int mips_next_event(unsigned long delta,
     20			   struct clock_event_device *evt)
     21{
     22	unsigned int cnt;
     23	int res;
     24
     25	cnt = read_c0_count();
     26	cnt += delta;
     27	write_c0_compare(cnt);
     28	res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
     29	return res;
     30}
     31
     32/**
     33 * calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
     34 *
     35 * Running under virtualisation can introduce overhead into mips_next_event() in
     36 * the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
     37 * potentially with an unnatural frequency, which makes a fixed min_delta_ns
     38 * value inappropriate as it may be too small.
     39 *
     40 * It can also introduce occasional latency from the guest being descheduled.
     41 *
     42 * This function calculates a good minimum delta based roughly on the 75th
     43 * percentile of the time taken to do the mips_next_event() sequence, in order
     44 * to handle potentially higher overhead while also eliminating outliers due to
     45 * unpredictable hypervisor latency (which can be handled by retries).
     46 *
     47 * Return:	An appropriate minimum delta for the clock event device.
     48 */
     49static unsigned int calculate_min_delta(void)
     50{
     51	unsigned int cnt, i, j, k, l;
     52	unsigned int buf1[4], buf2[3];
     53	unsigned int min_delta;
     54
     55	/*
     56	 * Calculate the median of 5 75th percentiles of 5 samples of how long
     57	 * it takes to set CP0_Compare = CP0_Count + delta.
     58	 */
     59	for (i = 0; i < 5; ++i) {
     60		for (j = 0; j < 5; ++j) {
     61			/*
     62			 * This is like the code in mips_next_event(), and
     63			 * directly measures the borderline "safe" delta.
     64			 */
     65			cnt = read_c0_count();
     66			write_c0_compare(cnt);
     67			cnt = read_c0_count() - cnt;
     68
     69			/* Sorted insert into buf1 */
     70			for (k = 0; k < j; ++k) {
     71				if (cnt < buf1[k]) {
     72					l = min_t(unsigned int,
     73						  j, ARRAY_SIZE(buf1) - 1);
     74					for (; l > k; --l)
     75						buf1[l] = buf1[l - 1];
     76					break;
     77				}
     78			}
     79			if (k < ARRAY_SIZE(buf1))
     80				buf1[k] = cnt;
     81		}
     82
     83		/* Sorted insert of 75th percentile into buf2 */
     84		for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
     85			if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
     86				l = min_t(unsigned int,
     87					  i, ARRAY_SIZE(buf2) - 1);
     88				for (; l > k; --l)
     89					buf2[l] = buf2[l - 1];
     90				break;
     91			}
     92		}
     93		if (k < ARRAY_SIZE(buf2))
     94			buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
     95	}
     96
     97	/* Use 2 * median of 75th percentiles */
     98	min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
     99
    100	/* Don't go too low */
    101	if (min_delta < 0x300)
    102		min_delta = 0x300;
    103
    104	pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
    105		 __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
    106	return min_delta;
    107}
    108
    109DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
    110int cp0_timer_irq_installed;
    111
    112/*
    113 * Possibly handle a performance counter interrupt.
    114 * Return true if the timer interrupt should not be checked
    115 */
    116static inline int handle_perf_irq(int r2)
    117{
    118	/*
    119	 * The performance counter overflow interrupt may be shared with the
    120	 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
    121	 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
    122	 * and we can't reliably determine if a counter interrupt has also
    123	 * happened (!r2) then don't check for a timer interrupt.
    124	 */
    125	return (cp0_perfcount_irq < 0) &&
    126		perf_irq() == IRQ_HANDLED &&
    127		!r2;
    128}
    129
    130irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
    131{
    132	const int r2 = cpu_has_mips_r2_r6;
    133	struct clock_event_device *cd;
    134	int cpu = smp_processor_id();
    135
    136	/*
    137	 * Suckage alert:
    138	 * Before R2 of the architecture there was no way to see if a
    139	 * performance counter interrupt was pending, so we have to run
    140	 * the performance counter interrupt handler anyway.
    141	 */
    142	if (handle_perf_irq(r2))
    143		return IRQ_HANDLED;
    144
    145	/*
    146	 * The same applies to performance counter interrupts.	But with the
    147	 * above we now know that the reason we got here must be a timer
    148	 * interrupt.  Being the paranoiacs we are we check anyway.
    149	 */
    150	if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
    151		/* Clear Count/Compare Interrupt */
    152		write_c0_compare(read_c0_compare());
    153		cd = &per_cpu(mips_clockevent_device, cpu);
    154		cd->event_handler(cd);
    155
    156		return IRQ_HANDLED;
    157	}
    158
    159	return IRQ_NONE;
    160}
    161
    162struct irqaction c0_compare_irqaction = {
    163	.handler = c0_compare_interrupt,
    164	/*
    165	 * IRQF_SHARED: The timer interrupt may be shared with other interrupts
    166	 * such as perf counter and FDC interrupts.
    167	 */
    168	.flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
    169	.name = "timer",
    170};
    171
    172
    173void mips_event_handler(struct clock_event_device *dev)
    174{
    175}
    176
    177/*
    178 * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
    179 */
    180static int c0_compare_int_pending(void)
    181{
    182	/* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
    183	return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
    184}
    185
    186/*
    187 * Compare interrupt can be routed and latched outside the core,
    188 * so wait up to worst case number of cycle counter ticks for timer interrupt
    189 * changes to propagate to the cause register.
    190 */
    191#define COMPARE_INT_SEEN_TICKS 50
    192
    193int c0_compare_int_usable(void)
    194{
    195	unsigned int delta;
    196	unsigned int cnt;
    197
    198	/*
    199	 * IP7 already pending?	 Try to clear it by acking the timer.
    200	 */
    201	if (c0_compare_int_pending()) {
    202		cnt = read_c0_count();
    203		write_c0_compare(cnt);
    204		back_to_back_c0_hazard();
    205		while (read_c0_count() < (cnt  + COMPARE_INT_SEEN_TICKS))
    206			if (!c0_compare_int_pending())
    207				break;
    208		if (c0_compare_int_pending())
    209			return 0;
    210	}
    211
    212	for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
    213		cnt = read_c0_count();
    214		cnt += delta;
    215		write_c0_compare(cnt);
    216		back_to_back_c0_hazard();
    217		if ((int)(read_c0_count() - cnt) < 0)
    218		    break;
    219		/* increase delta if the timer was already expired */
    220	}
    221
    222	while ((int)(read_c0_count() - cnt) <= 0)
    223		;	/* Wait for expiry  */
    224
    225	while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
    226		if (c0_compare_int_pending())
    227			break;
    228	if (!c0_compare_int_pending())
    229		return 0;
    230	cnt = read_c0_count();
    231	write_c0_compare(cnt);
    232	back_to_back_c0_hazard();
    233	while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
    234		if (!c0_compare_int_pending())
    235			break;
    236	if (c0_compare_int_pending())
    237		return 0;
    238
    239	/*
    240	 * Feels like a real count / compare timer.
    241	 */
    242	return 1;
    243}
    244
    245unsigned int __weak get_c0_compare_int(void)
    246{
    247	return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
    248}
    249
    250#ifdef CONFIG_CPU_FREQ
    251
    252static unsigned long mips_ref_freq;
    253
    254static int r4k_cpufreq_callback(struct notifier_block *nb,
    255				unsigned long val, void *data)
    256{
    257	struct cpufreq_freqs *freq = data;
    258	struct clock_event_device *cd;
    259	unsigned long rate;
    260	int cpu;
    261
    262	if (!mips_ref_freq)
    263		mips_ref_freq = freq->old;
    264
    265	if (val == CPUFREQ_POSTCHANGE) {
    266		rate = cpufreq_scale(mips_hpt_frequency, mips_ref_freq,
    267				     freq->new);
    268
    269		for_each_cpu(cpu, freq->policy->cpus) {
    270			cd = &per_cpu(mips_clockevent_device, cpu);
    271
    272			clockevents_update_freq(cd, rate);
    273		}
    274	}
    275
    276	return 0;
    277}
    278
    279static struct notifier_block r4k_cpufreq_notifier = {
    280	.notifier_call  = r4k_cpufreq_callback,
    281};
    282
    283static int __init r4k_register_cpufreq_notifier(void)
    284{
    285	return cpufreq_register_notifier(&r4k_cpufreq_notifier,
    286					 CPUFREQ_TRANSITION_NOTIFIER);
    287
    288}
    289core_initcall(r4k_register_cpufreq_notifier);
    290
    291#endif /* !CONFIG_CPU_FREQ */
    292
    293int r4k_clockevent_init(void)
    294{
    295	unsigned long flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED;
    296	unsigned int cpu = smp_processor_id();
    297	struct clock_event_device *cd;
    298	unsigned int irq, min_delta;
    299
    300	if (!cpu_has_counter || !mips_hpt_frequency)
    301		return -ENXIO;
    302
    303	if (!c0_compare_int_usable())
    304		return -ENXIO;
    305
    306	/*
    307	 * With vectored interrupts things are getting platform specific.
    308	 * get_c0_compare_int is a hook to allow a platform to return the
    309	 * interrupt number of its liking.
    310	 */
    311	irq = get_c0_compare_int();
    312
    313	cd = &per_cpu(mips_clockevent_device, cpu);
    314
    315	cd->name		= "MIPS";
    316	cd->features		= CLOCK_EVT_FEAT_ONESHOT |
    317				  CLOCK_EVT_FEAT_C3STOP |
    318				  CLOCK_EVT_FEAT_PERCPU;
    319
    320	min_delta		= calculate_min_delta();
    321
    322	cd->rating		= 300;
    323	cd->irq			= irq;
    324	cd->cpumask		= cpumask_of(cpu);
    325	cd->set_next_event	= mips_next_event;
    326	cd->event_handler	= mips_event_handler;
    327
    328	clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
    329
    330	if (cp0_timer_irq_installed)
    331		return 0;
    332
    333	cp0_timer_irq_installed = 1;
    334
    335	if (request_irq(irq, c0_compare_interrupt, flags, "timer",
    336			c0_compare_interrupt))
    337		pr_err("Failed to request irq %d (timer)\n", irq);
    338
    339	return 0;
    340}
    341