cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

multicalls.c (5127B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Xen hypercall batching.
      4 *
      5 * Xen allows multiple hypercalls to be issued at once, using the
      6 * multicall interface.  This allows the cost of trapping into the
      7 * hypervisor to be amortized over several calls.
      8 *
      9 * This file implements a simple interface for multicalls.  There's a
     10 * per-cpu buffer of outstanding multicalls.  When you want to queue a
     11 * multicall for issuing, you can allocate a multicall slot for the
     12 * call and its arguments, along with storage for space which is
     13 * pointed to by the arguments (for passing pointers to structures,
     14 * etc).  When the multicall is actually issued, all the space for the
     15 * commands and allocated memory is freed for reuse.
     16 *
     17 * Multicalls are flushed whenever any of the buffers get full, or
     18 * when explicitly requested.  There's no way to get per-multicall
     19 * return results back.  It will BUG if any of the multicalls fail.
     20 *
     21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
     22 */
     23#include <linux/percpu.h>
     24#include <linux/hardirq.h>
     25#include <linux/debugfs.h>
     26
     27#include <asm/xen/hypercall.h>
     28
     29#include "multicalls.h"
     30#include "debugfs.h"
     31
     32#define MC_BATCH	32
     33
     34#define MC_DEBUG	0
     35
     36#define MC_ARGS		(MC_BATCH * 16)
     37
     38
     39struct mc_buffer {
     40	unsigned mcidx, argidx, cbidx;
     41	struct multicall_entry entries[MC_BATCH];
     42#if MC_DEBUG
     43	struct multicall_entry debug[MC_BATCH];
     44	void *caller[MC_BATCH];
     45#endif
     46	unsigned char args[MC_ARGS];
     47	struct callback {
     48		void (*fn)(void *);
     49		void *data;
     50	} callbacks[MC_BATCH];
     51};
     52
     53static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
     54DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
     55
     56void xen_mc_flush(void)
     57{
     58	struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
     59	struct multicall_entry *mc;
     60	int ret = 0;
     61	unsigned long flags;
     62	int i;
     63
     64	BUG_ON(preemptible());
     65
     66	/* Disable interrupts in case someone comes in and queues
     67	   something in the middle */
     68	local_irq_save(flags);
     69
     70	trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
     71
     72#if MC_DEBUG
     73	memcpy(b->debug, b->entries,
     74	       b->mcidx * sizeof(struct multicall_entry));
     75#endif
     76
     77	switch (b->mcidx) {
     78	case 0:
     79		/* no-op */
     80		BUG_ON(b->argidx != 0);
     81		break;
     82
     83	case 1:
     84		/* Singleton multicall - bypass multicall machinery
     85		   and just do the call directly. */
     86		mc = &b->entries[0];
     87
     88		mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1],
     89					     mc->args[2], mc->args[3],
     90					     mc->args[4]);
     91		ret = mc->result < 0;
     92		break;
     93
     94	default:
     95		if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
     96			BUG();
     97		for (i = 0; i < b->mcidx; i++)
     98			if (b->entries[i].result < 0)
     99				ret++;
    100	}
    101
    102	if (WARN_ON(ret)) {
    103		pr_err("%d of %d multicall(s) failed: cpu %d\n",
    104		       ret, b->mcidx, smp_processor_id());
    105		for (i = 0; i < b->mcidx; i++) {
    106			if (b->entries[i].result < 0) {
    107#if MC_DEBUG
    108				pr_err("  call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n",
    109				       i + 1,
    110				       b->debug[i].op,
    111				       b->debug[i].args[0],
    112				       b->entries[i].result,
    113				       b->caller[i]);
    114#else
    115				pr_err("  call %2d: op=%lu arg=[%lx] result=%ld\n",
    116				       i + 1,
    117				       b->entries[i].op,
    118				       b->entries[i].args[0],
    119				       b->entries[i].result);
    120#endif
    121			}
    122		}
    123	}
    124
    125	b->mcidx = 0;
    126	b->argidx = 0;
    127
    128	for (i = 0; i < b->cbidx; i++) {
    129		struct callback *cb = &b->callbacks[i];
    130
    131		(*cb->fn)(cb->data);
    132	}
    133	b->cbidx = 0;
    134
    135	local_irq_restore(flags);
    136}
    137
    138struct multicall_space __xen_mc_entry(size_t args)
    139{
    140	struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
    141	struct multicall_space ret;
    142	unsigned argidx = roundup(b->argidx, sizeof(u64));
    143
    144	trace_xen_mc_entry_alloc(args);
    145
    146	BUG_ON(preemptible());
    147	BUG_ON(b->argidx >= MC_ARGS);
    148
    149	if (unlikely(b->mcidx == MC_BATCH ||
    150		     (argidx + args) >= MC_ARGS)) {
    151		trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ?
    152					  XEN_MC_FL_BATCH : XEN_MC_FL_ARGS);
    153		xen_mc_flush();
    154		argidx = roundup(b->argidx, sizeof(u64));
    155	}
    156
    157	ret.mc = &b->entries[b->mcidx];
    158#if MC_DEBUG
    159	b->caller[b->mcidx] = __builtin_return_address(0);
    160#endif
    161	b->mcidx++;
    162	ret.args = &b->args[argidx];
    163	b->argidx = argidx + args;
    164
    165	BUG_ON(b->argidx >= MC_ARGS);
    166	return ret;
    167}
    168
    169struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
    170{
    171	struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
    172	struct multicall_space ret = { NULL, NULL };
    173
    174	BUG_ON(preemptible());
    175	BUG_ON(b->argidx >= MC_ARGS);
    176
    177	if (unlikely(b->mcidx == 0 ||
    178		     b->entries[b->mcidx - 1].op != op)) {
    179		trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP);
    180		goto out;
    181	}
    182
    183	if (unlikely((b->argidx + size) >= MC_ARGS)) {
    184		trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE);
    185		goto out;
    186	}
    187
    188	ret.mc = &b->entries[b->mcidx - 1];
    189	ret.args = &b->args[b->argidx];
    190	b->argidx += size;
    191
    192	BUG_ON(b->argidx >= MC_ARGS);
    193
    194	trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK);
    195out:
    196	return ret;
    197}
    198
    199void xen_mc_callback(void (*fn)(void *), void *data)
    200{
    201	struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
    202	struct callback *cb;
    203
    204	if (b->cbidx == MC_BATCH) {
    205		trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK);
    206		xen_mc_flush();
    207	}
    208
    209	trace_xen_mc_callback(fn, data);
    210
    211	cb = &b->callbacks[b->cbidx++];
    212	cb->fn = fn;
    213	cb->data = data;
    214}