cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

time.c (4288B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Xen stolen ticks accounting.
      4 */
      5#include <linux/kernel.h>
      6#include <linux/kernel_stat.h>
      7#include <linux/math64.h>
      8#include <linux/gfp.h>
      9#include <linux/slab.h>
     10#include <linux/static_call.h>
     11
     12#include <asm/paravirt.h>
     13#include <asm/xen/hypervisor.h>
     14#include <asm/xen/hypercall.h>
     15
     16#include <xen/events.h>
     17#include <xen/features.h>
     18#include <xen/interface/xen.h>
     19#include <xen/interface/vcpu.h>
     20#include <xen/xen-ops.h>
     21
     22/* runstate info updated by Xen */
     23static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
     24
     25static DEFINE_PER_CPU(u64[4], old_runstate_time);
     26
     27/* return an consistent snapshot of 64-bit time/counter value */
     28static u64 get64(const u64 *p)
     29{
     30	u64 ret;
     31
     32	if (BITS_PER_LONG < 64) {
     33		u32 *p32 = (u32 *)p;
     34		u32 h, l, h2;
     35
     36		/*
     37		 * Read high then low, and then make sure high is
     38		 * still the same; this will only loop if low wraps
     39		 * and carries into high.
     40		 * XXX some clean way to make this endian-proof?
     41		 */
     42		do {
     43			h = READ_ONCE(p32[1]);
     44			l = READ_ONCE(p32[0]);
     45			h2 = READ_ONCE(p32[1]);
     46		} while(h2 != h);
     47
     48		ret = (((u64)h) << 32) | l;
     49	} else
     50		ret = READ_ONCE(*p);
     51
     52	return ret;
     53}
     54
     55static void xen_get_runstate_snapshot_cpu_delta(
     56			      struct vcpu_runstate_info *res, unsigned int cpu)
     57{
     58	u64 state_time;
     59	struct vcpu_runstate_info *state;
     60
     61	BUG_ON(preemptible());
     62
     63	state = per_cpu_ptr(&xen_runstate, cpu);
     64
     65	do {
     66		state_time = get64(&state->state_entry_time);
     67		rmb();	/* Hypervisor might update data. */
     68		*res = __READ_ONCE(*state);
     69		rmb();	/* Hypervisor might update data. */
     70	} while (get64(&state->state_entry_time) != state_time ||
     71		 (state_time & XEN_RUNSTATE_UPDATE));
     72}
     73
     74static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
     75					  unsigned int cpu)
     76{
     77	int i;
     78
     79	xen_get_runstate_snapshot_cpu_delta(res, cpu);
     80
     81	for (i = 0; i < 4; i++)
     82		res->time[i] += per_cpu(old_runstate_time, cpu)[i];
     83}
     84
     85void xen_manage_runstate_time(int action)
     86{
     87	static struct vcpu_runstate_info *runstate_delta;
     88	struct vcpu_runstate_info state;
     89	int cpu, i;
     90
     91	switch (action) {
     92	case -1: /* backup runstate time before suspend */
     93		if (unlikely(runstate_delta))
     94			pr_warn_once("%s: memory leak as runstate_delta is not NULL\n",
     95					__func__);
     96
     97		runstate_delta = kmalloc_array(num_possible_cpus(),
     98					sizeof(*runstate_delta),
     99					GFP_ATOMIC);
    100		if (unlikely(!runstate_delta)) {
    101			pr_warn("%s: failed to allocate runstate_delta\n",
    102					__func__);
    103			return;
    104		}
    105
    106		for_each_possible_cpu(cpu) {
    107			xen_get_runstate_snapshot_cpu_delta(&state, cpu);
    108			memcpy(runstate_delta[cpu].time, state.time,
    109					sizeof(runstate_delta[cpu].time));
    110		}
    111
    112		break;
    113
    114	case 0: /* backup runstate time after resume */
    115		if (unlikely(!runstate_delta)) {
    116			pr_warn("%s: cannot accumulate runstate time as runstate_delta is NULL\n",
    117					__func__);
    118			return;
    119		}
    120
    121		for_each_possible_cpu(cpu) {
    122			for (i = 0; i < 4; i++)
    123				per_cpu(old_runstate_time, cpu)[i] +=
    124					runstate_delta[cpu].time[i];
    125		}
    126
    127		break;
    128
    129	default: /* do not accumulate runstate time for checkpointing */
    130		break;
    131	}
    132
    133	if (action != -1 && runstate_delta) {
    134		kfree(runstate_delta);
    135		runstate_delta = NULL;
    136	}
    137}
    138
    139/*
    140 * Runstate accounting
    141 */
    142void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
    143{
    144	xen_get_runstate_snapshot_cpu(res, smp_processor_id());
    145}
    146
    147/* return true when a vcpu could run but has no real cpu to run on */
    148bool xen_vcpu_stolen(int vcpu)
    149{
    150	return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
    151}
    152
    153u64 xen_steal_clock(int cpu)
    154{
    155	struct vcpu_runstate_info state;
    156
    157	xen_get_runstate_snapshot_cpu(&state, cpu);
    158	return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
    159}
    160
    161void xen_setup_runstate_info(int cpu)
    162{
    163	struct vcpu_register_runstate_memory_area area;
    164
    165	area.addr.v = &per_cpu(xen_runstate, cpu);
    166
    167	if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
    168			       xen_vcpu_nr(cpu), &area))
    169		BUG();
    170}
    171
    172void __init xen_time_setup_guest(void)
    173{
    174	bool xen_runstate_remote;
    175
    176	xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
    177					VMASST_TYPE_runstate_update_flag);
    178
    179	static_call_update(pv_steal_clock, xen_steal_clock);
    180
    181	static_key_slow_inc(&paravirt_steal_enabled);
    182	if (xen_runstate_remote)
    183		static_key_slow_inc(&paravirt_steal_rq_enabled);
    184}