cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

emulate.c (6901B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *
      4 * Copyright IBM Corp. 2007
      5 * Copyright 2011 Freescale Semiconductor, Inc.
      6 *
      7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
      8 */
      9
     10#include <linux/jiffies.h>
     11#include <linux/hrtimer.h>
     12#include <linux/types.h>
     13#include <linux/string.h>
     14#include <linux/kvm_host.h>
     15#include <linux/clockchips.h>
     16
     17#include <asm/reg.h>
     18#include <asm/time.h>
     19#include <asm/byteorder.h>
     20#include <asm/kvm_ppc.h>
     21#include <asm/disassemble.h>
     22#include <asm/ppc-opcode.h>
     23#include "timing.h"
     24#include "trace.h"
     25
     26void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
     27{
     28	unsigned long dec_nsec;
     29	unsigned long long dec_time;
     30
     31	pr_debug("mtDEC: %lx\n", vcpu->arch.dec);
     32	hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
     33
     34#ifdef CONFIG_PPC_BOOK3S
     35	/* mtdec lowers the interrupt line when positive. */
     36	kvmppc_core_dequeue_dec(vcpu);
     37#endif
     38
     39#ifdef CONFIG_BOOKE
     40	/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
     41	if (vcpu->arch.dec == 0)
     42		return;
     43#endif
     44
     45	/*
     46	 * The decrementer ticks at the same rate as the timebase, so
     47	 * that's how we convert the guest DEC value to the number of
     48	 * host ticks.
     49	 */
     50
     51	dec_time = vcpu->arch.dec;
     52	/*
     53	 * Guest timebase ticks at the same frequency as host timebase.
     54	 * So use the host timebase calculations for decrementer emulation.
     55	 */
     56	dec_time = tb_to_ns(dec_time);
     57	dec_nsec = do_div(dec_time, NSEC_PER_SEC);
     58	hrtimer_start(&vcpu->arch.dec_timer,
     59		ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
     60	vcpu->arch.dec_jiffies = get_tb();
     61}
     62
     63u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
     64{
     65	u64 jd = tb - vcpu->arch.dec_jiffies;
     66
     67#ifdef CONFIG_BOOKE
     68	if (vcpu->arch.dec < jd)
     69		return 0;
     70#endif
     71
     72	return vcpu->arch.dec - jd;
     73}
     74
     75static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
     76{
     77	enum emulation_result emulated = EMULATE_DONE;
     78	ulong spr_val = kvmppc_get_gpr(vcpu, rs);
     79
     80	switch (sprn) {
     81	case SPRN_SRR0:
     82		kvmppc_set_srr0(vcpu, spr_val);
     83		break;
     84	case SPRN_SRR1:
     85		kvmppc_set_srr1(vcpu, spr_val);
     86		break;
     87
     88	/* XXX We need to context-switch the timebase for
     89	 * watchdog and FIT. */
     90	case SPRN_TBWL: break;
     91	case SPRN_TBWU: break;
     92
     93	case SPRN_DEC:
     94		vcpu->arch.dec = (u32) spr_val;
     95		kvmppc_emulate_dec(vcpu);
     96		break;
     97
     98	case SPRN_SPRG0:
     99		kvmppc_set_sprg0(vcpu, spr_val);
    100		break;
    101	case SPRN_SPRG1:
    102		kvmppc_set_sprg1(vcpu, spr_val);
    103		break;
    104	case SPRN_SPRG2:
    105		kvmppc_set_sprg2(vcpu, spr_val);
    106		break;
    107	case SPRN_SPRG3:
    108		kvmppc_set_sprg3(vcpu, spr_val);
    109		break;
    110
    111	/* PIR can legally be written, but we ignore it */
    112	case SPRN_PIR: break;
    113
    114	default:
    115		emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
    116								  spr_val);
    117		if (emulated == EMULATE_FAIL)
    118			printk(KERN_INFO "mtspr: unknown spr "
    119				"0x%x\n", sprn);
    120		break;
    121	}
    122
    123	kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
    124
    125	return emulated;
    126}
    127
    128static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
    129{
    130	enum emulation_result emulated = EMULATE_DONE;
    131	ulong spr_val = 0;
    132
    133	switch (sprn) {
    134	case SPRN_SRR0:
    135		spr_val = kvmppc_get_srr0(vcpu);
    136		break;
    137	case SPRN_SRR1:
    138		spr_val = kvmppc_get_srr1(vcpu);
    139		break;
    140	case SPRN_PVR:
    141		spr_val = vcpu->arch.pvr;
    142		break;
    143	case SPRN_PIR:
    144		spr_val = vcpu->vcpu_id;
    145		break;
    146
    147	/* Note: mftb and TBRL/TBWL are user-accessible, so
    148	 * the guest can always access the real TB anyways.
    149	 * In fact, we probably will never see these traps. */
    150	case SPRN_TBWL:
    151		spr_val = get_tb() >> 32;
    152		break;
    153	case SPRN_TBWU:
    154		spr_val = get_tb();
    155		break;
    156
    157	case SPRN_SPRG0:
    158		spr_val = kvmppc_get_sprg0(vcpu);
    159		break;
    160	case SPRN_SPRG1:
    161		spr_val = kvmppc_get_sprg1(vcpu);
    162		break;
    163	case SPRN_SPRG2:
    164		spr_val = kvmppc_get_sprg2(vcpu);
    165		break;
    166	case SPRN_SPRG3:
    167		spr_val = kvmppc_get_sprg3(vcpu);
    168		break;
    169	/* Note: SPRG4-7 are user-readable, so we don't get
    170	 * a trap. */
    171
    172	case SPRN_DEC:
    173		spr_val = kvmppc_get_dec(vcpu, get_tb());
    174		break;
    175	default:
    176		emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
    177								  &spr_val);
    178		if (unlikely(emulated == EMULATE_FAIL)) {
    179			printk(KERN_INFO "mfspr: unknown spr "
    180				"0x%x\n", sprn);
    181		}
    182		break;
    183	}
    184
    185	if (emulated == EMULATE_DONE)
    186		kvmppc_set_gpr(vcpu, rt, spr_val);
    187	kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
    188
    189	return emulated;
    190}
    191
    192/* XXX Should probably auto-generate instruction decoding for a particular core
    193 * from opcode tables in the future. */
    194int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
    195{
    196	u32 inst;
    197	int rs, rt, sprn;
    198	enum emulation_result emulated;
    199	int advance = 1;
    200
    201	/* this default type might be overwritten by subcategories */
    202	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
    203
    204	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
    205	if (emulated != EMULATE_DONE)
    206		return emulated;
    207
    208	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
    209
    210	rs = get_rs(inst);
    211	rt = get_rt(inst);
    212	sprn = get_sprn(inst);
    213
    214	switch (get_op(inst)) {
    215	case OP_TRAP:
    216#ifdef CONFIG_PPC_BOOK3S
    217	case OP_TRAP_64:
    218		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
    219#else
    220		kvmppc_core_queue_program(vcpu,
    221					  vcpu->arch.shared->esr | ESR_PTR);
    222#endif
    223		advance = 0;
    224		break;
    225
    226	case 31:
    227		switch (get_xop(inst)) {
    228
    229		case OP_31_XOP_TRAP:
    230#ifdef CONFIG_64BIT
    231		case OP_31_XOP_TRAP_64:
    232#endif
    233#ifdef CONFIG_PPC_BOOK3S
    234			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
    235#else
    236			kvmppc_core_queue_program(vcpu,
    237					vcpu->arch.shared->esr | ESR_PTR);
    238#endif
    239			advance = 0;
    240			break;
    241
    242		case OP_31_XOP_MFSPR:
    243			emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
    244			if (emulated == EMULATE_AGAIN) {
    245				emulated = EMULATE_DONE;
    246				advance = 0;
    247			}
    248			break;
    249
    250		case OP_31_XOP_MTSPR:
    251			emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
    252			if (emulated == EMULATE_AGAIN) {
    253				emulated = EMULATE_DONE;
    254				advance = 0;
    255			}
    256			break;
    257
    258		case OP_31_XOP_TLBSYNC:
    259			break;
    260
    261		default:
    262			/* Attempt core-specific emulation below. */
    263			emulated = EMULATE_FAIL;
    264		}
    265		break;
    266
    267	case 0:
    268		/*
    269		 * Instruction with primary opcode 0. Based on PowerISA
    270		 * these are illegal instructions.
    271		 */
    272		if (inst == KVMPPC_INST_SW_BREAKPOINT) {
    273			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
    274			vcpu->run->debug.arch.status = 0;
    275			vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
    276			emulated = EMULATE_EXIT_USER;
    277			advance = 0;
    278		} else
    279			emulated = EMULATE_FAIL;
    280
    281		break;
    282
    283	default:
    284		emulated = EMULATE_FAIL;
    285	}
    286
    287	if (emulated == EMULATE_FAIL) {
    288		emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst,
    289							       &advance);
    290		if (emulated == EMULATE_AGAIN) {
    291			advance = 0;
    292		} else if (emulated == EMULATE_FAIL) {
    293			advance = 0;
    294			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
    295			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
    296		}
    297	}
    298
    299	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
    300
    301	/* Advance past emulated instruction. */
    302	if (advance)
    303		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
    304
    305	return emulated;
    306}
    307EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);