cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sched_clock.c (7899B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Generic sched_clock() support, to extend low level hardware time
      4 * counters to full 64-bit ns values.
      5 */
      6#include <linux/clocksource.h>
      7#include <linux/init.h>
      8#include <linux/jiffies.h>
      9#include <linux/ktime.h>
     10#include <linux/kernel.h>
     11#include <linux/math.h>
     12#include <linux/moduleparam.h>
     13#include <linux/sched.h>
     14#include <linux/sched/clock.h>
     15#include <linux/syscore_ops.h>
     16#include <linux/hrtimer.h>
     17#include <linux/sched_clock.h>
     18#include <linux/seqlock.h>
     19#include <linux/bitops.h>
     20
     21#include "timekeeping.h"
     22
     23/**
     24 * struct clock_data - all data needed for sched_clock() (including
     25 *                     registration of a new clock source)
     26 *
     27 * @seq:		Sequence counter for protecting updates. The lowest
     28 *			bit is the index for @read_data.
     29 * @read_data:		Data required to read from sched_clock.
     30 * @wrap_kt:		Duration for which clock can run before wrapping.
     31 * @rate:		Tick rate of the registered clock.
     32 * @actual_read_sched_clock: Registered hardware level clock read function.
     33 *
     34 * The ordering of this structure has been chosen to optimize cache
     35 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
     36 * into a single 64-byte cache line.
     37 */
     38struct clock_data {
     39	seqcount_latch_t	seq;
     40	struct clock_read_data	read_data[2];
     41	ktime_t			wrap_kt;
     42	unsigned long		rate;
     43
     44	u64 (*actual_read_sched_clock)(void);
     45};
     46
     47static struct hrtimer sched_clock_timer;
     48static int irqtime = -1;
     49
     50core_param(irqtime, irqtime, int, 0400);
     51
     52static u64 notrace jiffy_sched_clock_read(void)
     53{
     54	/*
     55	 * We don't need to use get_jiffies_64 on 32-bit arches here
     56	 * because we register with BITS_PER_LONG
     57	 */
     58	return (u64)(jiffies - INITIAL_JIFFIES);
     59}
     60
     61static struct clock_data cd ____cacheline_aligned = {
     62	.read_data[0] = { .mult = NSEC_PER_SEC / HZ,
     63			  .read_sched_clock = jiffy_sched_clock_read, },
     64	.actual_read_sched_clock = jiffy_sched_clock_read,
     65};
     66
     67static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
     68{
     69	return (cyc * mult) >> shift;
     70}
     71
     72notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
     73{
     74	*seq = raw_read_seqcount_latch(&cd.seq);
     75	return cd.read_data + (*seq & 1);
     76}
     77
     78notrace int sched_clock_read_retry(unsigned int seq)
     79{
     80	return read_seqcount_latch_retry(&cd.seq, seq);
     81}
     82
     83unsigned long long notrace sched_clock(void)
     84{
     85	u64 cyc, res;
     86	unsigned int seq;
     87	struct clock_read_data *rd;
     88
     89	do {
     90		rd = sched_clock_read_begin(&seq);
     91
     92		cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
     93		      rd->sched_clock_mask;
     94		res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
     95	} while (sched_clock_read_retry(seq));
     96
     97	return res;
     98}
     99
    100/*
    101 * Updating the data required to read the clock.
    102 *
    103 * sched_clock() will never observe mis-matched data even if called from
    104 * an NMI. We do this by maintaining an odd/even copy of the data and
    105 * steering sched_clock() to one or the other using a sequence counter.
    106 * In order to preserve the data cache profile of sched_clock() as much
    107 * as possible the system reverts back to the even copy when the update
    108 * completes; the odd copy is used *only* during an update.
    109 */
    110static void update_clock_read_data(struct clock_read_data *rd)
    111{
    112	/* update the backup (odd) copy with the new data */
    113	cd.read_data[1] = *rd;
    114
    115	/* steer readers towards the odd copy */
    116	raw_write_seqcount_latch(&cd.seq);
    117
    118	/* now its safe for us to update the normal (even) copy */
    119	cd.read_data[0] = *rd;
    120
    121	/* switch readers back to the even copy */
    122	raw_write_seqcount_latch(&cd.seq);
    123}
    124
    125/*
    126 * Atomically update the sched_clock() epoch.
    127 */
    128static void update_sched_clock(void)
    129{
    130	u64 cyc;
    131	u64 ns;
    132	struct clock_read_data rd;
    133
    134	rd = cd.read_data[0];
    135
    136	cyc = cd.actual_read_sched_clock();
    137	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
    138
    139	rd.epoch_ns = ns;
    140	rd.epoch_cyc = cyc;
    141
    142	update_clock_read_data(&rd);
    143}
    144
    145static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
    146{
    147	update_sched_clock();
    148	hrtimer_forward_now(hrt, cd.wrap_kt);
    149
    150	return HRTIMER_RESTART;
    151}
    152
    153void __init
    154sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
    155{
    156	u64 res, wrap, new_mask, new_epoch, cyc, ns;
    157	u32 new_mult, new_shift;
    158	unsigned long r, flags;
    159	char r_unit;
    160	struct clock_read_data rd;
    161
    162	if (cd.rate > rate)
    163		return;
    164
    165	/* Cannot register a sched_clock with interrupts on */
    166	local_irq_save(flags);
    167
    168	/* Calculate the mult/shift to convert counter ticks to ns. */
    169	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
    170
    171	new_mask = CLOCKSOURCE_MASK(bits);
    172	cd.rate = rate;
    173
    174	/* Calculate how many nanosecs until we risk wrapping */
    175	wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
    176	cd.wrap_kt = ns_to_ktime(wrap);
    177
    178	rd = cd.read_data[0];
    179
    180	/* Update epoch for new counter and update 'epoch_ns' from old counter*/
    181	new_epoch = read();
    182	cyc = cd.actual_read_sched_clock();
    183	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
    184	cd.actual_read_sched_clock = read;
    185
    186	rd.read_sched_clock	= read;
    187	rd.sched_clock_mask	= new_mask;
    188	rd.mult			= new_mult;
    189	rd.shift		= new_shift;
    190	rd.epoch_cyc		= new_epoch;
    191	rd.epoch_ns		= ns;
    192
    193	update_clock_read_data(&rd);
    194
    195	if (sched_clock_timer.function != NULL) {
    196		/* update timeout for clock wrap */
    197		hrtimer_start(&sched_clock_timer, cd.wrap_kt,
    198			      HRTIMER_MODE_REL_HARD);
    199	}
    200
    201	r = rate;
    202	if (r >= 4000000) {
    203		r = DIV_ROUND_CLOSEST(r, 1000000);
    204		r_unit = 'M';
    205	} else if (r >= 4000) {
    206		r = DIV_ROUND_CLOSEST(r, 1000);
    207		r_unit = 'k';
    208	} else {
    209		r_unit = ' ';
    210	}
    211
    212	/* Calculate the ns resolution of this counter */
    213	res = cyc_to_ns(1ULL, new_mult, new_shift);
    214
    215	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
    216		bits, r, r_unit, res, wrap);
    217
    218	/* Enable IRQ time accounting if we have a fast enough sched_clock() */
    219	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
    220		enable_sched_clock_irqtime();
    221
    222	local_irq_restore(flags);
    223
    224	pr_debug("Registered %pS as sched_clock source\n", read);
    225}
    226
    227void __init generic_sched_clock_init(void)
    228{
    229	/*
    230	 * If no sched_clock() function has been provided at that point,
    231	 * make it the final one.
    232	 */
    233	if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
    234		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
    235
    236	update_sched_clock();
    237
    238	/*
    239	 * Start the timer to keep sched_clock() properly updated and
    240	 * sets the initial epoch.
    241	 */
    242	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
    243	sched_clock_timer.function = sched_clock_poll;
    244	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
    245}
    246
    247/*
    248 * Clock read function for use when the clock is suspended.
    249 *
    250 * This function makes it appear to sched_clock() as if the clock
    251 * stopped counting at its last update.
    252 *
    253 * This function must only be called from the critical
    254 * section in sched_clock(). It relies on the read_seqcount_retry()
    255 * at the end of the critical section to be sure we observe the
    256 * correct copy of 'epoch_cyc'.
    257 */
    258static u64 notrace suspended_sched_clock_read(void)
    259{
    260	unsigned int seq = raw_read_seqcount_latch(&cd.seq);
    261
    262	return cd.read_data[seq & 1].epoch_cyc;
    263}
    264
    265int sched_clock_suspend(void)
    266{
    267	struct clock_read_data *rd = &cd.read_data[0];
    268
    269	update_sched_clock();
    270	hrtimer_cancel(&sched_clock_timer);
    271	rd->read_sched_clock = suspended_sched_clock_read;
    272
    273	return 0;
    274}
    275
    276void sched_clock_resume(void)
    277{
    278	struct clock_read_data *rd = &cd.read_data[0];
    279
    280	rd->epoch_cyc = cd.actual_read_sched_clock();
    281	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
    282	rd->read_sched_clock = cd.actual_read_sched_clock;
    283}
    284
    285static struct syscore_ops sched_clock_ops = {
    286	.suspend	= sched_clock_suspend,
    287	.resume		= sched_clock_resume,
    288};
    289
    290static int __init sched_clock_syscore_init(void)
    291{
    292	register_syscore_ops(&sched_clock_ops);
    293
    294	return 0;
    295}
    296device_initcall(sched_clock_syscore_init);