cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

e500.c (14529B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
      4 *
      5 * Author: Yu Liu, <yu.liu@freescale.com>
      6 *
      7 * Description:
      8 * This file is derived from arch/powerpc/kvm/44x.c,
      9 * by Hollis Blanchard <hollisb@us.ibm.com>.
     10 */
     11
     12#include <linux/kvm_host.h>
     13#include <linux/slab.h>
     14#include <linux/err.h>
     15#include <linux/export.h>
     16#include <linux/module.h>
     17#include <linux/miscdevice.h>
     18
     19#include <asm/reg.h>
     20#include <asm/cputable.h>
     21#include <asm/kvm_ppc.h>
     22
     23#include "../mm/mmu_decl.h"
     24#include "booke.h"
     25#include "e500.h"
     26
     27struct id {
     28	unsigned long val;
     29	struct id **pentry;
     30};
     31
     32#define NUM_TIDS 256
     33
     34/*
     35 * This table provide mappings from:
     36 * (guestAS,guestTID,guestPR) --> ID of physical cpu
     37 * guestAS	[0..1]
     38 * guestTID	[0..255]
     39 * guestPR	[0..1]
     40 * ID		[1..255]
     41 * Each vcpu keeps one vcpu_id_table.
     42 */
     43struct vcpu_id_table {
     44	struct id id[2][NUM_TIDS][2];
     45};
     46
     47/*
     48 * This table provide reversed mappings of vcpu_id_table:
     49 * ID --> address of vcpu_id_table item.
     50 * Each physical core has one pcpu_id_table.
     51 */
     52struct pcpu_id_table {
     53	struct id *entry[NUM_TIDS];
     54};
     55
     56static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
     57
     58/* This variable keeps last used shadow ID on local core.
     59 * The valid range of shadow ID is [1..255] */
     60static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
     61
     62/*
     63 * Allocate a free shadow id and setup a valid sid mapping in given entry.
     64 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
     65 *
     66 * The caller must have preemption disabled, and keep it that way until
     67 * it has finished with the returned shadow id (either written into the
     68 * TLB or arch.shadow_pid, or discarded).
     69 */
     70static inline int local_sid_setup_one(struct id *entry)
     71{
     72	unsigned long sid;
     73	int ret = -1;
     74
     75	sid = __this_cpu_inc_return(pcpu_last_used_sid);
     76	if (sid < NUM_TIDS) {
     77		__this_cpu_write(pcpu_sids.entry[sid], entry);
     78		entry->val = sid;
     79		entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
     80		ret = sid;
     81	}
     82
     83	/*
     84	 * If sid == NUM_TIDS, we've run out of sids.  We return -1, and
     85	 * the caller will invalidate everything and start over.
     86	 *
     87	 * sid > NUM_TIDS indicates a race, which we disable preemption to
     88	 * avoid.
     89	 */
     90	WARN_ON(sid > NUM_TIDS);
     91
     92	return ret;
     93}
     94
     95/*
     96 * Check if given entry contain a valid shadow id mapping.
     97 * An ID mapping is considered valid only if
     98 * both vcpu and pcpu know this mapping.
     99 *
    100 * The caller must have preemption disabled, and keep it that way until
    101 * it has finished with the returned shadow id (either written into the
    102 * TLB or arch.shadow_pid, or discarded).
    103 */
    104static inline int local_sid_lookup(struct id *entry)
    105{
    106	if (entry && entry->val != 0 &&
    107	    __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
    108	    entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
    109		return entry->val;
    110	return -1;
    111}
    112
    113/* Invalidate all id mappings on local core -- call with preempt disabled */
    114static inline void local_sid_destroy_all(void)
    115{
    116	__this_cpu_write(pcpu_last_used_sid, 0);
    117	memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
    118}
    119
    120static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
    121{
    122	vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
    123	return vcpu_e500->idt;
    124}
    125
    126static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
    127{
    128	kfree(vcpu_e500->idt);
    129	vcpu_e500->idt = NULL;
    130}
    131
    132/* Map guest pid to shadow.
    133 * We use PID to keep shadow of current guest non-zero PID,
    134 * and use PID1 to keep shadow of guest zero PID.
    135 * So that guest tlbe with TID=0 can be accessed at any time */
    136static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
    137{
    138	preempt_disable();
    139	vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
    140			get_cur_as(&vcpu_e500->vcpu),
    141			get_cur_pid(&vcpu_e500->vcpu),
    142			get_cur_pr(&vcpu_e500->vcpu), 1);
    143	vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
    144			get_cur_as(&vcpu_e500->vcpu), 0,
    145			get_cur_pr(&vcpu_e500->vcpu), 1);
    146	preempt_enable();
    147}
    148
    149/* Invalidate all mappings on vcpu */
    150static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
    151{
    152	memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
    153
    154	/* Update shadow pid when mappings are changed */
    155	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
    156}
    157
    158/* Invalidate one ID mapping on vcpu */
    159static inline void kvmppc_e500_id_table_reset_one(
    160			       struct kvmppc_vcpu_e500 *vcpu_e500,
    161			       int as, int pid, int pr)
    162{
    163	struct vcpu_id_table *idt = vcpu_e500->idt;
    164
    165	BUG_ON(as >= 2);
    166	BUG_ON(pid >= NUM_TIDS);
    167	BUG_ON(pr >= 2);
    168
    169	idt->id[as][pid][pr].val = 0;
    170	idt->id[as][pid][pr].pentry = NULL;
    171
    172	/* Update shadow pid when mappings are changed */
    173	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
    174}
    175
    176/*
    177 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
    178 * This function first lookup if a valid mapping exists,
    179 * if not, then creates a new one.
    180 *
    181 * The caller must have preemption disabled, and keep it that way until
    182 * it has finished with the returned shadow id (either written into the
    183 * TLB or arch.shadow_pid, or discarded).
    184 */
    185unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
    186				 unsigned int as, unsigned int gid,
    187				 unsigned int pr, int avoid_recursion)
    188{
    189	struct vcpu_id_table *idt = vcpu_e500->idt;
    190	int sid;
    191
    192	BUG_ON(as >= 2);
    193	BUG_ON(gid >= NUM_TIDS);
    194	BUG_ON(pr >= 2);
    195
    196	sid = local_sid_lookup(&idt->id[as][gid][pr]);
    197
    198	while (sid <= 0) {
    199		/* No mapping yet */
    200		sid = local_sid_setup_one(&idt->id[as][gid][pr]);
    201		if (sid <= 0) {
    202			_tlbil_all();
    203			local_sid_destroy_all();
    204		}
    205
    206		/* Update shadow pid when mappings are changed */
    207		if (!avoid_recursion)
    208			kvmppc_e500_recalc_shadow_pid(vcpu_e500);
    209	}
    210
    211	return sid;
    212}
    213
    214unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
    215				      struct kvm_book3e_206_tlb_entry *gtlbe)
    216{
    217	return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
    218				   get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
    219}
    220
    221void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
    222{
    223	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
    224
    225	if (vcpu->arch.pid != pid) {
    226		vcpu_e500->pid[0] = vcpu->arch.pid = pid;
    227		kvmppc_e500_recalc_shadow_pid(vcpu_e500);
    228	}
    229}
    230
    231/* gtlbe must not be mapped by more than one host tlbe */
    232void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
    233                           struct kvm_book3e_206_tlb_entry *gtlbe)
    234{
    235	struct vcpu_id_table *idt = vcpu_e500->idt;
    236	unsigned int pr, tid, ts;
    237	int pid;
    238	u32 val, eaddr;
    239	unsigned long flags;
    240
    241	ts = get_tlb_ts(gtlbe);
    242	tid = get_tlb_tid(gtlbe);
    243
    244	preempt_disable();
    245
    246	/* One guest ID may be mapped to two shadow IDs */
    247	for (pr = 0; pr < 2; pr++) {
    248		/*
    249		 * The shadow PID can have a valid mapping on at most one
    250		 * host CPU.  In the common case, it will be valid on this
    251		 * CPU, in which case we do a local invalidation of the
    252		 * specific address.
    253		 *
    254		 * If the shadow PID is not valid on the current host CPU,
    255		 * we invalidate the entire shadow PID.
    256		 */
    257		pid = local_sid_lookup(&idt->id[ts][tid][pr]);
    258		if (pid <= 0) {
    259			kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
    260			continue;
    261		}
    262
    263		/*
    264		 * The guest is invalidating a 4K entry which is in a PID
    265		 * that has a valid shadow mapping on this host CPU.  We
    266		 * search host TLB to invalidate it's shadow TLB entry,
    267		 * similar to __tlbil_va except that we need to look in AS1.
    268		 */
    269		val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
    270		eaddr = get_tlb_eaddr(gtlbe);
    271
    272		local_irq_save(flags);
    273
    274		mtspr(SPRN_MAS6, val);
    275		asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
    276		val = mfspr(SPRN_MAS1);
    277		if (val & MAS1_VALID) {
    278			mtspr(SPRN_MAS1, val & ~MAS1_VALID);
    279			asm volatile("tlbwe");
    280		}
    281
    282		local_irq_restore(flags);
    283	}
    284
    285	preempt_enable();
    286}
    287
    288void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
    289{
    290	kvmppc_e500_id_table_reset_all(vcpu_e500);
    291}
    292
    293void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
    294{
    295	/* Recalc shadow pid since MSR changes */
    296	kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
    297}
    298
    299static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
    300{
    301	kvmppc_booke_vcpu_load(vcpu, cpu);
    302
    303	/* Shadow PID may be expired on local core */
    304	kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
    305}
    306
    307static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
    308{
    309#ifdef CONFIG_SPE
    310	if (vcpu->arch.shadow_msr & MSR_SPE)
    311		kvmppc_vcpu_disable_spe(vcpu);
    312#endif
    313
    314	kvmppc_booke_vcpu_put(vcpu);
    315}
    316
    317int kvmppc_core_check_processor_compat(void)
    318{
    319	int r;
    320
    321	if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0)
    322		r = 0;
    323	else
    324		r = -ENOTSUPP;
    325
    326	return r;
    327}
    328
    329static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
    330{
    331	struct kvm_book3e_206_tlb_entry *tlbe;
    332
    333	/* Insert large initial mapping for guest. */
    334	tlbe = get_entry(vcpu_e500, 1, 0);
    335	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
    336	tlbe->mas2 = 0;
    337	tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
    338
    339	/* 4K map for serial output. Used by kernel wrapper. */
    340	tlbe = get_entry(vcpu_e500, 1, 1);
    341	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
    342	tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
    343	tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
    344}
    345
    346int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
    347{
    348	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
    349
    350	kvmppc_e500_tlb_setup(vcpu_e500);
    351
    352	/* Registers init */
    353	vcpu->arch.pvr = mfspr(SPRN_PVR);
    354	vcpu_e500->svr = mfspr(SPRN_SVR);
    355
    356	vcpu->arch.cpu_type = KVM_CPU_E500V2;
    357
    358	return 0;
    359}
    360
    361static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
    362				      struct kvm_sregs *sregs)
    363{
    364	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
    365
    366	sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE |
    367	                       KVM_SREGS_E_PM;
    368	sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
    369
    370	sregs->u.e.impl.fsl.features = 0;
    371	sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
    372	sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
    373	sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
    374
    375	sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
    376	sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
    377	sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
    378	sregs->u.e.ivor_high[3] =
    379		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
    380
    381	kvmppc_get_sregs_ivor(vcpu, sregs);
    382	kvmppc_get_sregs_e500_tlb(vcpu, sregs);
    383	return 0;
    384}
    385
    386static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
    387				      struct kvm_sregs *sregs)
    388{
    389	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
    390	int ret;
    391
    392	if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
    393		vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
    394		vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
    395		vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
    396	}
    397
    398	ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
    399	if (ret < 0)
    400		return ret;
    401
    402	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
    403		return 0;
    404
    405	if (sregs->u.e.features & KVM_SREGS_E_SPE) {
    406		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
    407			sregs->u.e.ivor_high[0];
    408		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
    409			sregs->u.e.ivor_high[1];
    410		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
    411			sregs->u.e.ivor_high[2];
    412	}
    413
    414	if (sregs->u.e.features & KVM_SREGS_E_PM) {
    415		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
    416			sregs->u.e.ivor_high[3];
    417	}
    418
    419	return kvmppc_set_sregs_ivor(vcpu, sregs);
    420}
    421
    422static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
    423				   union kvmppc_one_reg *val)
    424{
    425	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
    426	return r;
    427}
    428
    429static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
    430				   union kvmppc_one_reg *val)
    431{
    432	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
    433	return r;
    434}
    435
    436static int kvmppc_core_vcpu_create_e500(struct kvm_vcpu *vcpu)
    437{
    438	struct kvmppc_vcpu_e500 *vcpu_e500;
    439	int err;
    440
    441	BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
    442	vcpu_e500 = to_e500(vcpu);
    443
    444	if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
    445		return -ENOMEM;
    446
    447	err = kvmppc_e500_tlb_init(vcpu_e500);
    448	if (err)
    449		goto uninit_id;
    450
    451	vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
    452	if (!vcpu->arch.shared) {
    453		err = -ENOMEM;
    454		goto uninit_tlb;
    455	}
    456
    457	return 0;
    458
    459uninit_tlb:
    460	kvmppc_e500_tlb_uninit(vcpu_e500);
    461uninit_id:
    462	kvmppc_e500_id_table_free(vcpu_e500);
    463	return err;
    464}
    465
    466static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
    467{
    468	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
    469
    470	free_page((unsigned long)vcpu->arch.shared);
    471	kvmppc_e500_tlb_uninit(vcpu_e500);
    472	kvmppc_e500_id_table_free(vcpu_e500);
    473}
    474
    475static int kvmppc_core_init_vm_e500(struct kvm *kvm)
    476{
    477	return 0;
    478}
    479
    480static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
    481{
    482}
    483
    484static struct kvmppc_ops kvm_ops_e500 = {
    485	.get_sregs = kvmppc_core_get_sregs_e500,
    486	.set_sregs = kvmppc_core_set_sregs_e500,
    487	.get_one_reg = kvmppc_get_one_reg_e500,
    488	.set_one_reg = kvmppc_set_one_reg_e500,
    489	.vcpu_load   = kvmppc_core_vcpu_load_e500,
    490	.vcpu_put    = kvmppc_core_vcpu_put_e500,
    491	.vcpu_create = kvmppc_core_vcpu_create_e500,
    492	.vcpu_free   = kvmppc_core_vcpu_free_e500,
    493	.init_vm = kvmppc_core_init_vm_e500,
    494	.destroy_vm = kvmppc_core_destroy_vm_e500,
    495	.emulate_op = kvmppc_core_emulate_op_e500,
    496	.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
    497	.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
    498	.create_vcpu_debugfs = kvmppc_create_vcpu_debugfs_e500,
    499};
    500
    501static int __init kvmppc_e500_init(void)
    502{
    503	int r, i;
    504	unsigned long ivor[3];
    505	/* Process remaining handlers above the generic first 16 */
    506	unsigned long *handler = &kvmppc_booke_handler_addr[16];
    507	unsigned long handler_len;
    508	unsigned long max_ivor = 0;
    509
    510	r = kvmppc_core_check_processor_compat();
    511	if (r)
    512		goto err_out;
    513
    514	r = kvmppc_booke_init();
    515	if (r)
    516		goto err_out;
    517
    518	/* copy extra E500 exception handlers */
    519	ivor[0] = mfspr(SPRN_IVOR32);
    520	ivor[1] = mfspr(SPRN_IVOR33);
    521	ivor[2] = mfspr(SPRN_IVOR34);
    522	for (i = 0; i < 3; i++) {
    523		if (ivor[i] > ivor[max_ivor])
    524			max_ivor = i;
    525
    526		handler_len = handler[i + 1] - handler[i];
    527		memcpy((void *)kvmppc_booke_handlers + ivor[i],
    528		       (void *)handler[i], handler_len);
    529	}
    530	handler_len = handler[max_ivor + 1] - handler[max_ivor];
    531	flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
    532			   ivor[max_ivor] + handler_len);
    533
    534	r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
    535	if (r)
    536		goto err_out;
    537	kvm_ops_e500.owner = THIS_MODULE;
    538	kvmppc_pr_ops = &kvm_ops_e500;
    539
    540err_out:
    541	return r;
    542}
    543
    544static void __exit kvmppc_e500_exit(void)
    545{
    546	kvmppc_pr_ops = NULL;
    547	kvmppc_booke_exit();
    548}
    549
    550module_init(kvmppc_e500_init);
    551module_exit(kvmppc_e500_exit);
    552MODULE_ALIAS_MISCDEV(KVM_MINOR);
    553MODULE_ALIAS("devname:kvm");