cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmio-mod.c (11415B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *
      4 * Copyright (C) IBM Corporation, 2005
      5 *               Jeff Muizelaar, 2006, 2007
      6 *               Pekka Paalanen, 2008 <pq@iki.fi>
      7 *
      8 * Derived from the read-mod example from relay-examples by Tom Zanussi.
      9 */
     10
     11#define pr_fmt(fmt) "mmiotrace: " fmt
     12
     13#include <linux/moduleparam.h>
     14#include <linux/debugfs.h>
     15#include <linux/slab.h>
     16#include <linux/uaccess.h>
     17#include <linux/io.h>
     18#include <linux/mmiotrace.h>
     19#include <linux/pgtable.h>
     20#include <asm/e820/api.h> /* for ISA_START_ADDRESS */
     21#include <linux/atomic.h>
     22#include <linux/percpu.h>
     23#include <linux/cpu.h>
     24
     25#include "pf_in.h"
     26
     27struct trap_reason {
     28	unsigned long addr;
     29	unsigned long ip;
     30	enum reason_type type;
     31	int active_traces;
     32};
     33
     34struct remap_trace {
     35	struct list_head list;
     36	struct kmmio_probe probe;
     37	resource_size_t phys;
     38	unsigned long id;
     39};
     40
     41/* Accessed per-cpu. */
     42static DEFINE_PER_CPU(struct trap_reason, pf_reason);
     43static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
     44
     45static DEFINE_MUTEX(mmiotrace_mutex);
     46static DEFINE_SPINLOCK(trace_lock);
     47static atomic_t mmiotrace_enabled;
     48static LIST_HEAD(trace_list);		/* struct remap_trace */
     49
     50/*
     51 * Locking in this file:
     52 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
     53 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
     54 *   and trace_lock.
     55 * - Routines depending on is_enabled() must take trace_lock.
     56 * - trace_list users must hold trace_lock.
     57 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
     58 * - pre/post callbacks assume the effect of is_enabled() being true.
     59 */
     60
     61/* module parameters */
     62static unsigned long	filter_offset;
     63static bool		nommiotrace;
     64static bool		trace_pc;
     65
     66module_param(filter_offset, ulong, 0);
     67module_param(nommiotrace, bool, 0);
     68module_param(trace_pc, bool, 0);
     69
     70MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
     71MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
     72MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
     73
     74static bool is_enabled(void)
     75{
     76	return atomic_read(&mmiotrace_enabled);
     77}
     78
     79static void print_pte(unsigned long address)
     80{
     81	unsigned int level;
     82	pte_t *pte = lookup_address(address, &level);
     83
     84	if (!pte) {
     85		pr_err("Error in %s: no pte for page 0x%08lx\n",
     86		       __func__, address);
     87		return;
     88	}
     89
     90	if (level == PG_LEVEL_2M) {
     91		pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
     92			 address);
     93		BUG();
     94	}
     95	pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
     96		address,
     97		(unsigned long long)pte_val(*pte),
     98		(unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
     99}
    100
    101/*
    102 * For some reason the pre/post pairs have been called in an
    103 * unmatched order. Report and die.
    104 */
    105static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
    106{
    107	const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
    108	pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
    109		 addr, my_reason->addr);
    110	print_pte(addr);
    111	pr_emerg("faulting IP is at %pS\n", (void *)regs->ip);
    112	pr_emerg("last faulting IP was at %pS\n", (void *)my_reason->ip);
    113#ifdef __i386__
    114	pr_emerg("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
    115		 regs->ax, regs->bx, regs->cx, regs->dx);
    116	pr_emerg("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
    117		 regs->si, regs->di, regs->bp, regs->sp);
    118#else
    119	pr_emerg("rax: %016lx   rcx: %016lx   rdx: %016lx\n",
    120		 regs->ax, regs->cx, regs->dx);
    121	pr_emerg("rsi: %016lx   rdi: %016lx   rbp: %016lx   rsp: %016lx\n",
    122		 regs->si, regs->di, regs->bp, regs->sp);
    123#endif
    124	put_cpu_var(pf_reason);
    125	BUG();
    126}
    127
    128static void pre(struct kmmio_probe *p, struct pt_regs *regs,
    129						unsigned long addr)
    130{
    131	struct trap_reason *my_reason = &get_cpu_var(pf_reason);
    132	struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
    133	const unsigned long instptr = instruction_pointer(regs);
    134	const enum reason_type type = get_ins_type(instptr);
    135	struct remap_trace *trace = p->private;
    136
    137	/* it doesn't make sense to have more than one active trace per cpu */
    138	if (my_reason->active_traces)
    139		die_kmmio_nesting_error(regs, addr);
    140	else
    141		my_reason->active_traces++;
    142
    143	my_reason->type = type;
    144	my_reason->addr = addr;
    145	my_reason->ip = instptr;
    146
    147	my_trace->phys = addr - trace->probe.addr + trace->phys;
    148	my_trace->map_id = trace->id;
    149
    150	/*
    151	 * Only record the program counter when requested.
    152	 * It may taint clean-room reverse engineering.
    153	 */
    154	if (trace_pc)
    155		my_trace->pc = instptr;
    156	else
    157		my_trace->pc = 0;
    158
    159	/*
    160	 * XXX: the timestamp recorded will be *after* the tracing has been
    161	 * done, not at the time we hit the instruction. SMP implications
    162	 * on event ordering?
    163	 */
    164
    165	switch (type) {
    166	case REG_READ:
    167		my_trace->opcode = MMIO_READ;
    168		my_trace->width = get_ins_mem_width(instptr);
    169		break;
    170	case REG_WRITE:
    171		my_trace->opcode = MMIO_WRITE;
    172		my_trace->width = get_ins_mem_width(instptr);
    173		my_trace->value = get_ins_reg_val(instptr, regs);
    174		break;
    175	case IMM_WRITE:
    176		my_trace->opcode = MMIO_WRITE;
    177		my_trace->width = get_ins_mem_width(instptr);
    178		my_trace->value = get_ins_imm_val(instptr);
    179		break;
    180	default:
    181		{
    182			unsigned char *ip = (unsigned char *)instptr;
    183			my_trace->opcode = MMIO_UNKNOWN_OP;
    184			my_trace->width = 0;
    185			my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
    186								*(ip + 2);
    187		}
    188	}
    189	put_cpu_var(cpu_trace);
    190	put_cpu_var(pf_reason);
    191}
    192
    193static void post(struct kmmio_probe *p, unsigned long condition,
    194							struct pt_regs *regs)
    195{
    196	struct trap_reason *my_reason = &get_cpu_var(pf_reason);
    197	struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
    198
    199	/* this should always return the active_trace count to 0 */
    200	my_reason->active_traces--;
    201	if (my_reason->active_traces) {
    202		pr_emerg("unexpected post handler");
    203		BUG();
    204	}
    205
    206	switch (my_reason->type) {
    207	case REG_READ:
    208		my_trace->value = get_ins_reg_val(my_reason->ip, regs);
    209		break;
    210	default:
    211		break;
    212	}
    213
    214	mmio_trace_rw(my_trace);
    215	put_cpu_var(cpu_trace);
    216	put_cpu_var(pf_reason);
    217}
    218
    219static void ioremap_trace_core(resource_size_t offset, unsigned long size,
    220							void __iomem *addr)
    221{
    222	static atomic_t next_id;
    223	struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
    224	/* These are page-unaligned. */
    225	struct mmiotrace_map map = {
    226		.phys = offset,
    227		.virt = (unsigned long)addr,
    228		.len = size,
    229		.opcode = MMIO_PROBE
    230	};
    231
    232	if (!trace) {
    233		pr_err("kmalloc failed in ioremap\n");
    234		return;
    235	}
    236
    237	*trace = (struct remap_trace) {
    238		.probe = {
    239			.addr = (unsigned long)addr,
    240			.len = size,
    241			.pre_handler = pre,
    242			.post_handler = post,
    243			.private = trace
    244		},
    245		.phys = offset,
    246		.id = atomic_inc_return(&next_id)
    247	};
    248	map.map_id = trace->id;
    249
    250	spin_lock_irq(&trace_lock);
    251	if (!is_enabled()) {
    252		kfree(trace);
    253		goto not_enabled;
    254	}
    255
    256	mmio_trace_mapping(&map);
    257	list_add_tail(&trace->list, &trace_list);
    258	if (!nommiotrace)
    259		register_kmmio_probe(&trace->probe);
    260
    261not_enabled:
    262	spin_unlock_irq(&trace_lock);
    263}
    264
    265void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
    266						void __iomem *addr)
    267{
    268	if (!is_enabled()) /* recheck and proper locking in *_core() */
    269		return;
    270
    271	pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
    272		 (unsigned long long)offset, size, addr);
    273	if ((filter_offset) && (offset != filter_offset))
    274		return;
    275	ioremap_trace_core(offset, size, addr);
    276}
    277
    278static void iounmap_trace_core(volatile void __iomem *addr)
    279{
    280	struct mmiotrace_map map = {
    281		.phys = 0,
    282		.virt = (unsigned long)addr,
    283		.len = 0,
    284		.opcode = MMIO_UNPROBE
    285	};
    286	struct remap_trace *trace;
    287	struct remap_trace *tmp;
    288	struct remap_trace *found_trace = NULL;
    289
    290	pr_debug("Unmapping %p.\n", addr);
    291
    292	spin_lock_irq(&trace_lock);
    293	if (!is_enabled())
    294		goto not_enabled;
    295
    296	list_for_each_entry_safe(trace, tmp, &trace_list, list) {
    297		if ((unsigned long)addr == trace->probe.addr) {
    298			if (!nommiotrace)
    299				unregister_kmmio_probe(&trace->probe);
    300			list_del(&trace->list);
    301			found_trace = trace;
    302			break;
    303		}
    304	}
    305	map.map_id = (found_trace) ? found_trace->id : -1;
    306	mmio_trace_mapping(&map);
    307
    308not_enabled:
    309	spin_unlock_irq(&trace_lock);
    310	if (found_trace) {
    311		synchronize_rcu(); /* unregister_kmmio_probe() requirement */
    312		kfree(found_trace);
    313	}
    314}
    315
    316void mmiotrace_iounmap(volatile void __iomem *addr)
    317{
    318	might_sleep();
    319	if (is_enabled()) /* recheck and proper locking in *_core() */
    320		iounmap_trace_core(addr);
    321}
    322
    323int mmiotrace_printk(const char *fmt, ...)
    324{
    325	int ret = 0;
    326	va_list args;
    327	unsigned long flags;
    328	va_start(args, fmt);
    329
    330	spin_lock_irqsave(&trace_lock, flags);
    331	if (is_enabled())
    332		ret = mmio_trace_printk(fmt, args);
    333	spin_unlock_irqrestore(&trace_lock, flags);
    334
    335	va_end(args);
    336	return ret;
    337}
    338EXPORT_SYMBOL(mmiotrace_printk);
    339
    340static void clear_trace_list(void)
    341{
    342	struct remap_trace *trace;
    343	struct remap_trace *tmp;
    344
    345	/*
    346	 * No locking required, because the caller ensures we are in a
    347	 * critical section via mutex, and is_enabled() is false,
    348	 * i.e. nothing can traverse or modify this list.
    349	 * Caller also ensures is_enabled() cannot change.
    350	 */
    351	list_for_each_entry(trace, &trace_list, list) {
    352		pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
    353			  trace->probe.addr, trace->probe.len);
    354		if (!nommiotrace)
    355			unregister_kmmio_probe(&trace->probe);
    356	}
    357	synchronize_rcu(); /* unregister_kmmio_probe() requirement */
    358
    359	list_for_each_entry_safe(trace, tmp, &trace_list, list) {
    360		list_del(&trace->list);
    361		kfree(trace);
    362	}
    363}
    364
    365#ifdef CONFIG_HOTPLUG_CPU
    366static cpumask_var_t downed_cpus;
    367
    368static void enter_uniprocessor(void)
    369{
    370	int cpu;
    371	int err;
    372
    373	if (!cpumask_available(downed_cpus) &&
    374	    !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
    375		pr_notice("Failed to allocate mask\n");
    376		goto out;
    377	}
    378
    379	cpus_read_lock();
    380	cpumask_copy(downed_cpus, cpu_online_mask);
    381	cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
    382	if (num_online_cpus() > 1)
    383		pr_notice("Disabling non-boot CPUs...\n");
    384	cpus_read_unlock();
    385
    386	for_each_cpu(cpu, downed_cpus) {
    387		err = remove_cpu(cpu);
    388		if (!err)
    389			pr_info("CPU%d is down.\n", cpu);
    390		else
    391			pr_err("Error taking CPU%d down: %d\n", cpu, err);
    392	}
    393out:
    394	if (num_online_cpus() > 1)
    395		pr_warn("multiple CPUs still online, may miss events.\n");
    396}
    397
    398static void leave_uniprocessor(void)
    399{
    400	int cpu;
    401	int err;
    402
    403	if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus))
    404		return;
    405	pr_notice("Re-enabling CPUs...\n");
    406	for_each_cpu(cpu, downed_cpus) {
    407		err = add_cpu(cpu);
    408		if (!err)
    409			pr_info("enabled CPU%d.\n", cpu);
    410		else
    411			pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
    412	}
    413}
    414
    415#else /* !CONFIG_HOTPLUG_CPU */
    416static void enter_uniprocessor(void)
    417{
    418	if (num_online_cpus() > 1)
    419		pr_warn("multiple CPUs are online, may miss events. "
    420			"Suggest booting with maxcpus=1 kernel argument.\n");
    421}
    422
    423static void leave_uniprocessor(void)
    424{
    425}
    426#endif
    427
    428void enable_mmiotrace(void)
    429{
    430	mutex_lock(&mmiotrace_mutex);
    431	if (is_enabled())
    432		goto out;
    433
    434	if (nommiotrace)
    435		pr_info("MMIO tracing disabled.\n");
    436	kmmio_init();
    437	enter_uniprocessor();
    438	spin_lock_irq(&trace_lock);
    439	atomic_inc(&mmiotrace_enabled);
    440	spin_unlock_irq(&trace_lock);
    441	pr_info("enabled.\n");
    442out:
    443	mutex_unlock(&mmiotrace_mutex);
    444}
    445
    446void disable_mmiotrace(void)
    447{
    448	mutex_lock(&mmiotrace_mutex);
    449	if (!is_enabled())
    450		goto out;
    451
    452	spin_lock_irq(&trace_lock);
    453	atomic_dec(&mmiotrace_enabled);
    454	BUG_ON(is_enabled());
    455	spin_unlock_irq(&trace_lock);
    456
    457	clear_trace_list(); /* guarantees: no more kmmio callbacks */
    458	leave_uniprocessor();
    459	kmmio_cleanup();
    460	pr_info("disabled.\n");
    461out:
    462	mutex_unlock(&mmiotrace_mutex);
    463}