cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

events_2l.c (11013B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Xen event channels (2-level ABI)
      4 *
      5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
      6 */
      7
      8#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
      9
     10#include <linux/linkage.h>
     11#include <linux/interrupt.h>
     12#include <linux/irq.h>
     13
     14#include <asm/sync_bitops.h>
     15#include <asm/xen/hypercall.h>
     16#include <asm/xen/hypervisor.h>
     17
     18#include <xen/xen.h>
     19#include <xen/xen-ops.h>
     20#include <xen/events.h>
     21#include <xen/interface/xen.h>
     22#include <xen/interface/event_channel.h>
     23
     24#include "events_internal.h"
     25
     26/*
     27 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
     28 * careful to only use bitops which allow for this (e.g
     29 * test_bit/find_first_bit and friends but not __ffs) and to pass
     30 * BITS_PER_EVTCHN_WORD as the bitmask length.
     31 */
     32#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
     33/*
     34 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
     35 * array. Primarily to avoid long lines (hence the terse name).
     36 */
     37#define BM(x) (unsigned long *)(x)
     38/* Find the first set bit in a evtchn mask */
     39#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
     40
     41#define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
     42
     43static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
     44
     45static unsigned evtchn_2l_max_channels(void)
     46{
     47	return EVTCHN_2L_NR_CHANNELS;
     48}
     49
     50static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
     51{
     52	clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
     53}
     54
     55static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
     56				  unsigned int old_cpu)
     57{
     58	clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu)));
     59	set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
     60}
     61
     62static void evtchn_2l_clear_pending(evtchn_port_t port)
     63{
     64	struct shared_info *s = HYPERVISOR_shared_info;
     65	sync_clear_bit(port, BM(&s->evtchn_pending[0]));
     66}
     67
     68static void evtchn_2l_set_pending(evtchn_port_t port)
     69{
     70	struct shared_info *s = HYPERVISOR_shared_info;
     71	sync_set_bit(port, BM(&s->evtchn_pending[0]));
     72}
     73
     74static bool evtchn_2l_is_pending(evtchn_port_t port)
     75{
     76	struct shared_info *s = HYPERVISOR_shared_info;
     77	return sync_test_bit(port, BM(&s->evtchn_pending[0]));
     78}
     79
     80static void evtchn_2l_mask(evtchn_port_t port)
     81{
     82	struct shared_info *s = HYPERVISOR_shared_info;
     83	sync_set_bit(port, BM(&s->evtchn_mask[0]));
     84}
     85
     86static void evtchn_2l_unmask(evtchn_port_t port)
     87{
     88	struct shared_info *s = HYPERVISOR_shared_info;
     89	unsigned int cpu = get_cpu();
     90	int do_hypercall = 0, evtchn_pending = 0;
     91
     92	BUG_ON(!irqs_disabled());
     93
     94	smp_wmb();	/* All writes before unmask must be visible. */
     95
     96	if (unlikely((cpu != cpu_from_evtchn(port))))
     97		do_hypercall = 1;
     98	else {
     99		/*
    100		 * Need to clear the mask before checking pending to
    101		 * avoid a race with an event becoming pending.
    102		 *
    103		 * EVTCHNOP_unmask will only trigger an upcall if the
    104		 * mask bit was set, so if a hypercall is needed
    105		 * remask the event.
    106		 */
    107		sync_clear_bit(port, BM(&s->evtchn_mask[0]));
    108		evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
    109
    110		if (unlikely(evtchn_pending && xen_hvm_domain())) {
    111			sync_set_bit(port, BM(&s->evtchn_mask[0]));
    112			do_hypercall = 1;
    113		}
    114	}
    115
    116	/* Slow path (hypercall) if this is a non-local port or if this is
    117	 * an hvm domain and an event is pending (hvm domains don't have
    118	 * their own implementation of irq_enable). */
    119	if (do_hypercall) {
    120		struct evtchn_unmask unmask = { .port = port };
    121		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
    122	} else {
    123		struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
    124
    125		/*
    126		 * The following is basically the equivalent of
    127		 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
    128		 * the interrupt edge' if the channel is masked.
    129		 */
    130		if (evtchn_pending &&
    131		    !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
    132					   BM(&vcpu_info->evtchn_pending_sel)))
    133			vcpu_info->evtchn_upcall_pending = 1;
    134	}
    135
    136	put_cpu();
    137}
    138
    139static DEFINE_PER_CPU(unsigned int, current_word_idx);
    140static DEFINE_PER_CPU(unsigned int, current_bit_idx);
    141
    142/*
    143 * Mask out the i least significant bits of w
    144 */
    145#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
    146
    147static inline xen_ulong_t active_evtchns(unsigned int cpu,
    148					 struct shared_info *sh,
    149					 unsigned int idx)
    150{
    151	return sh->evtchn_pending[idx] &
    152		per_cpu(cpu_evtchn_mask, cpu)[idx] &
    153		~sh->evtchn_mask[idx];
    154}
    155
    156/*
    157 * Search the CPU's pending events bitmasks.  For each one found, map
    158 * the event number to an irq, and feed it into do_IRQ() for handling.
    159 *
    160 * Xen uses a two-level bitmap to speed searching.  The first level is
    161 * a bitset of words which contain pending event bits.  The second
    162 * level is a bitset of pending events themselves.
    163 */
    164static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
    165{
    166	int irq;
    167	xen_ulong_t pending_words;
    168	xen_ulong_t pending_bits;
    169	int start_word_idx, start_bit_idx;
    170	int word_idx, bit_idx;
    171	int i;
    172	struct shared_info *s = HYPERVISOR_shared_info;
    173	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
    174
    175	/* Timer interrupt has highest priority. */
    176	irq = irq_from_virq(cpu, VIRQ_TIMER);
    177	if (irq != -1) {
    178		evtchn_port_t evtchn = evtchn_from_irq(irq);
    179		word_idx = evtchn / BITS_PER_LONG;
    180		bit_idx = evtchn % BITS_PER_LONG;
    181		if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
    182			generic_handle_irq(irq);
    183	}
    184
    185	/*
    186	 * Master flag must be cleared /before/ clearing
    187	 * selector flag. xchg_xen_ulong must contain an
    188	 * appropriate barrier.
    189	 */
    190	pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
    191
    192	start_word_idx = __this_cpu_read(current_word_idx);
    193	start_bit_idx = __this_cpu_read(current_bit_idx);
    194
    195	word_idx = start_word_idx;
    196
    197	for (i = 0; pending_words != 0; i++) {
    198		xen_ulong_t words;
    199
    200		words = MASK_LSBS(pending_words, word_idx);
    201
    202		/*
    203		 * If we masked out all events, wrap to beginning.
    204		 */
    205		if (words == 0) {
    206			word_idx = 0;
    207			bit_idx = 0;
    208			continue;
    209		}
    210		word_idx = EVTCHN_FIRST_BIT(words);
    211
    212		pending_bits = active_evtchns(cpu, s, word_idx);
    213		bit_idx = 0; /* usually scan entire word from start */
    214		/*
    215		 * We scan the starting word in two parts.
    216		 *
    217		 * 1st time: start in the middle, scanning the
    218		 * upper bits.
    219		 *
    220		 * 2nd time: scan the whole word (not just the
    221		 * parts skipped in the first pass) -- if an
    222		 * event in the previously scanned bits is
    223		 * pending again it would just be scanned on
    224		 * the next loop anyway.
    225		 */
    226		if (word_idx == start_word_idx) {
    227			if (i == 0)
    228				bit_idx = start_bit_idx;
    229		}
    230
    231		do {
    232			xen_ulong_t bits;
    233			evtchn_port_t port;
    234
    235			bits = MASK_LSBS(pending_bits, bit_idx);
    236
    237			/* If we masked out all events, move on. */
    238			if (bits == 0)
    239				break;
    240
    241			bit_idx = EVTCHN_FIRST_BIT(bits);
    242
    243			/* Process port. */
    244			port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
    245			handle_irq_for_port(port, ctrl);
    246
    247			bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
    248
    249			/* Next caller starts at last processed + 1 */
    250			__this_cpu_write(current_word_idx,
    251					 bit_idx ? word_idx :
    252					 (word_idx+1) % BITS_PER_EVTCHN_WORD);
    253			__this_cpu_write(current_bit_idx, bit_idx);
    254		} while (bit_idx != 0);
    255
    256		/* Scan start_l1i twice; all others once. */
    257		if ((word_idx != start_word_idx) || (i != 0))
    258			pending_words &= ~(1UL << word_idx);
    259
    260		word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
    261	}
    262}
    263
    264irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
    265{
    266	struct shared_info *sh = HYPERVISOR_shared_info;
    267	int cpu = smp_processor_id();
    268	xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
    269	int i;
    270	unsigned long flags;
    271	static DEFINE_SPINLOCK(debug_lock);
    272	struct vcpu_info *v;
    273
    274	spin_lock_irqsave(&debug_lock, flags);
    275
    276	printk("\nvcpu %d\n  ", cpu);
    277
    278	for_each_online_cpu(i) {
    279		int pending;
    280		v = per_cpu(xen_vcpu, i);
    281		pending = (get_irq_regs() && i == cpu)
    282			? xen_irqs_disabled(get_irq_regs())
    283			: v->evtchn_upcall_mask;
    284		printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n  ", i,
    285		       pending, v->evtchn_upcall_pending,
    286		       (int)(sizeof(v->evtchn_pending_sel)*2),
    287		       v->evtchn_pending_sel);
    288	}
    289	v = per_cpu(xen_vcpu, cpu);
    290
    291	printk("\npending:\n   ");
    292	for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
    293		printk("%0*"PRI_xen_ulong"%s",
    294		       (int)sizeof(sh->evtchn_pending[0])*2,
    295		       sh->evtchn_pending[i],
    296		       i % 8 == 0 ? "\n   " : " ");
    297	printk("\nglobal mask:\n   ");
    298	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
    299		printk("%0*"PRI_xen_ulong"%s",
    300		       (int)(sizeof(sh->evtchn_mask[0])*2),
    301		       sh->evtchn_mask[i],
    302		       i % 8 == 0 ? "\n   " : " ");
    303
    304	printk("\nglobally unmasked:\n   ");
    305	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
    306		printk("%0*"PRI_xen_ulong"%s",
    307		       (int)(sizeof(sh->evtchn_mask[0])*2),
    308		       sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
    309		       i % 8 == 0 ? "\n   " : " ");
    310
    311	printk("\nlocal cpu%d mask:\n   ", cpu);
    312	for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
    313		printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
    314		       cpu_evtchn[i],
    315		       i % 8 == 0 ? "\n   " : " ");
    316
    317	printk("\nlocally unmasked:\n   ");
    318	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
    319		xen_ulong_t pending = sh->evtchn_pending[i]
    320			& ~sh->evtchn_mask[i]
    321			& cpu_evtchn[i];
    322		printk("%0*"PRI_xen_ulong"%s",
    323		       (int)(sizeof(sh->evtchn_mask[0])*2),
    324		       pending, i % 8 == 0 ? "\n   " : " ");
    325	}
    326
    327	printk("\npending list:\n");
    328	for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
    329		if (sync_test_bit(i, BM(sh->evtchn_pending))) {
    330			int word_idx = i / BITS_PER_EVTCHN_WORD;
    331			printk("  %d: event %d -> irq %d%s%s%s\n",
    332			       cpu_from_evtchn(i), i,
    333			       get_evtchn_to_irq(i),
    334			       sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
    335			       ? "" : " l2-clear",
    336			       !sync_test_bit(i, BM(sh->evtchn_mask))
    337			       ? "" : " globally-masked",
    338			       sync_test_bit(i, BM(cpu_evtchn))
    339			       ? "" : " locally-masked");
    340		}
    341	}
    342
    343	spin_unlock_irqrestore(&debug_lock, flags);
    344
    345	return IRQ_HANDLED;
    346}
    347
    348static void evtchn_2l_resume(void)
    349{
    350	int i;
    351
    352	for_each_online_cpu(i)
    353		memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
    354				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
    355}
    356
    357static int evtchn_2l_percpu_deinit(unsigned int cpu)
    358{
    359	memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
    360			EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
    361
    362	return 0;
    363}
    364
    365static const struct evtchn_ops evtchn_ops_2l = {
    366	.max_channels      = evtchn_2l_max_channels,
    367	.nr_channels       = evtchn_2l_max_channels,
    368	.remove            = evtchn_2l_remove,
    369	.bind_to_cpu       = evtchn_2l_bind_to_cpu,
    370	.clear_pending     = evtchn_2l_clear_pending,
    371	.set_pending       = evtchn_2l_set_pending,
    372	.is_pending        = evtchn_2l_is_pending,
    373	.mask              = evtchn_2l_mask,
    374	.unmask            = evtchn_2l_unmask,
    375	.handle_events     = evtchn_2l_handle_events,
    376	.resume	           = evtchn_2l_resume,
    377	.percpu_deinit     = evtchn_2l_percpu_deinit,
    378};
    379
    380void __init xen_evtchn_2l_init(void)
    381{
    382	pr_info("Using 2-level ABI\n");
    383	evtchn_ops = &evtchn_ops_2l;
    384}