cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

entry.c (25649B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Generation of main entry point for the guest, exception handling.
      7 *
      8 * Copyright (C) 2012  MIPS Technologies, Inc.
      9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
     10 *
     11 * Copyright (C) 2016 Imagination Technologies Ltd.
     12 */
     13
     14#include <linux/kvm_host.h>
     15#include <linux/log2.h>
     16#include <asm/mmu_context.h>
     17#include <asm/msa.h>
     18#include <asm/setup.h>
     19#include <asm/tlbex.h>
     20#include <asm/uasm.h>
     21
     22/* Register names */
     23#define ZERO		0
     24#define AT		1
     25#define V0		2
     26#define V1		3
     27#define A0		4
     28#define A1		5
     29
     30#if _MIPS_SIM == _MIPS_SIM_ABI32
     31#define T0		8
     32#define T1		9
     33#define T2		10
     34#define T3		11
     35#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
     36
     37#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
     38#define T0		12
     39#define T1		13
     40#define T2		14
     41#define T3		15
     42#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
     43
     44#define S0		16
     45#define S1		17
     46#define T9		25
     47#define K0		26
     48#define K1		27
     49#define GP		28
     50#define SP		29
     51#define RA		31
     52
     53/* Some CP0 registers */
     54#define C0_PWBASE	5, 5
     55#define C0_HWRENA	7, 0
     56#define C0_BADVADDR	8, 0
     57#define C0_BADINSTR	8, 1
     58#define C0_BADINSTRP	8, 2
     59#define C0_PGD		9, 7
     60#define C0_ENTRYHI	10, 0
     61#define C0_GUESTCTL1	10, 4
     62#define C0_STATUS	12, 0
     63#define C0_GUESTCTL0	12, 6
     64#define C0_CAUSE	13, 0
     65#define C0_EPC		14, 0
     66#define C0_EBASE	15, 1
     67#define C0_CONFIG5	16, 5
     68#define C0_DDATA_LO	28, 3
     69#define C0_ERROREPC	30, 0
     70
     71#define CALLFRAME_SIZ   32
     72
     73#ifdef CONFIG_64BIT
     74#define ST0_KX_IF_64	ST0_KX
     75#else
     76#define ST0_KX_IF_64	0
     77#endif
     78
     79static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
     80static unsigned int scratch_tmp[2] = { C0_ERROREPC };
     81
     82enum label_id {
     83	label_fpu_1 = 1,
     84	label_msa_1,
     85	label_return_to_host,
     86	label_kernel_asid,
     87	label_exit_common,
     88};
     89
     90UASM_L_LA(_fpu_1)
     91UASM_L_LA(_msa_1)
     92UASM_L_LA(_return_to_host)
     93UASM_L_LA(_kernel_asid)
     94UASM_L_LA(_exit_common)
     95
     96static void *kvm_mips_build_enter_guest(void *addr);
     97static void *kvm_mips_build_ret_from_exit(void *addr);
     98static void *kvm_mips_build_ret_to_guest(void *addr);
     99static void *kvm_mips_build_ret_to_host(void *addr);
    100
    101/*
    102 * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
    103 * we assume symmetry.
    104 */
    105static int c0_kscratch(void)
    106{
    107	return 31;
    108}
    109
    110/**
    111 * kvm_mips_entry_setup() - Perform global setup for entry code.
    112 *
    113 * Perform global setup for entry code, such as choosing a scratch register.
    114 *
    115 * Returns:	0 on success.
    116 *		-errno on failure.
    117 */
    118int kvm_mips_entry_setup(void)
    119{
    120	/*
    121	 * We prefer to use KScratchN registers if they are available over the
    122	 * defaults above, which may not work on all cores.
    123	 */
    124	unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
    125
    126	if (pgd_reg != -1)
    127		kscratch_mask &= ~BIT(pgd_reg);
    128
    129	/* Pick a scratch register for storing VCPU */
    130	if (kscratch_mask) {
    131		scratch_vcpu[0] = c0_kscratch();
    132		scratch_vcpu[1] = ffs(kscratch_mask) - 1;
    133		kscratch_mask &= ~BIT(scratch_vcpu[1]);
    134	}
    135
    136	/* Pick a scratch register to use as a temp for saving state */
    137	if (kscratch_mask) {
    138		scratch_tmp[0] = c0_kscratch();
    139		scratch_tmp[1] = ffs(kscratch_mask) - 1;
    140		kscratch_mask &= ~BIT(scratch_tmp[1]);
    141	}
    142
    143	return 0;
    144}
    145
    146static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
    147					unsigned int frame)
    148{
    149	/* Save the VCPU scratch register value in cp0_epc of the stack frame */
    150	UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
    151	UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
    152
    153	/* Save the temp scratch register value in cp0_cause of stack frame */
    154	if (scratch_tmp[0] == c0_kscratch()) {
    155		UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
    156		UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
    157	}
    158}
    159
    160static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
    161					   unsigned int frame)
    162{
    163	/*
    164	 * Restore host scratch register values saved by
    165	 * kvm_mips_build_save_scratch().
    166	 */
    167	UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
    168	UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
    169
    170	if (scratch_tmp[0] == c0_kscratch()) {
    171		UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
    172		UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
    173	}
    174}
    175
    176/**
    177 * build_set_exc_base() - Assemble code to write exception base address.
    178 * @p:		Code buffer pointer.
    179 * @reg:	Source register (generated code may set WG bit in @reg).
    180 *
    181 * Assemble code to modify the exception base address in the EBase register,
    182 * using the appropriately sized access and setting the WG bit if necessary.
    183 */
    184static inline void build_set_exc_base(u32 **p, unsigned int reg)
    185{
    186	if (cpu_has_ebase_wg) {
    187		/* Set WG so that all the bits get written */
    188		uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
    189		UASM_i_MTC0(p, reg, C0_EBASE);
    190	} else {
    191		uasm_i_mtc0(p, reg, C0_EBASE);
    192	}
    193}
    194
    195/**
    196 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
    197 * @addr:	Address to start writing code.
    198 *
    199 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
    200 * conforms to the following prototype:
    201 *
    202 * int vcpu_run(struct kvm_vcpu *vcpu);
    203 *
    204 * The exit from the guest and return to the caller is handled by the code
    205 * generated by kvm_mips_build_ret_to_host().
    206 *
    207 * Returns:	Next address after end of written function.
    208 */
    209void *kvm_mips_build_vcpu_run(void *addr)
    210{
    211	u32 *p = addr;
    212	unsigned int i;
    213
    214	/*
    215	 * A0: vcpu
    216	 */
    217
    218	/* k0/k1 not being used in host kernel context */
    219	UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
    220	for (i = 16; i < 32; ++i) {
    221		if (i == 24)
    222			i = 28;
    223		UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
    224	}
    225
    226	/* Save host status */
    227	uasm_i_mfc0(&p, V0, C0_STATUS);
    228	UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
    229
    230	/* Save scratch registers, will be used to store pointer to vcpu etc */
    231	kvm_mips_build_save_scratch(&p, V1, K1);
    232
    233	/* VCPU scratch register has pointer to vcpu */
    234	UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
    235
    236	/* Offset into vcpu->arch */
    237	UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
    238
    239	/*
    240	 * Save the host stack to VCPU, used for exception processing
    241	 * when we exit from the Guest
    242	 */
    243	UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
    244
    245	/* Save the kernel gp as well */
    246	UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
    247
    248	/*
    249	 * Setup status register for running the guest in UM, interrupts
    250	 * are disabled
    251	 */
    252	UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
    253	uasm_i_mtc0(&p, K0, C0_STATUS);
    254	uasm_i_ehb(&p);
    255
    256	/* load up the new EBASE */
    257	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
    258	build_set_exc_base(&p, K0);
    259
    260	/*
    261	 * Now that the new EBASE has been loaded, unset BEV, set
    262	 * interrupt mask as it was but make sure that timer interrupts
    263	 * are enabled
    264	 */
    265	uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
    266	uasm_i_andi(&p, V0, V0, ST0_IM);
    267	uasm_i_or(&p, K0, K0, V0);
    268	uasm_i_mtc0(&p, K0, C0_STATUS);
    269	uasm_i_ehb(&p);
    270
    271	p = kvm_mips_build_enter_guest(p);
    272
    273	return p;
    274}
    275
    276/**
    277 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
    278 * @addr:	Address to start writing code.
    279 *
    280 * Assemble the code to resume guest execution. This code is common between the
    281 * initial entry into the guest from the host, and returning from the exit
    282 * handler back to the guest.
    283 *
    284 * Returns:	Next address after end of written function.
    285 */
    286static void *kvm_mips_build_enter_guest(void *addr)
    287{
    288	u32 *p = addr;
    289	unsigned int i;
    290	struct uasm_label labels[2];
    291	struct uasm_reloc relocs[2];
    292	struct uasm_label __maybe_unused *l = labels;
    293	struct uasm_reloc __maybe_unused *r = relocs;
    294
    295	memset(labels, 0, sizeof(labels));
    296	memset(relocs, 0, sizeof(relocs));
    297
    298	/* Set Guest EPC */
    299	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
    300	UASM_i_MTC0(&p, T0, C0_EPC);
    301
    302	/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
    303	if (cpu_has_ldpte)
    304		UASM_i_MFC0(&p, K0, C0_PWBASE);
    305	else
    306		UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
    307	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
    308
    309	/*
    310	 * Set up KVM GPA pgd.
    311	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
    312	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
    313	 * - write mm->pgd into CP0_PWBase
    314	 *
    315	 * We keep S0 pointing at struct kvm so we can load the ASID below.
    316	 */
    317	UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
    318			  (int)offsetof(struct kvm_vcpu, arch), K1);
    319	UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
    320	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
    321	uasm_i_jalr(&p, RA, T9);
    322	/* delay slot */
    323	if (cpu_has_htw)
    324		UASM_i_MTC0(&p, A0, C0_PWBASE);
    325	else
    326		uasm_i_nop(&p);
    327
    328	/* Set GM bit to setup eret to VZ guest context */
    329	uasm_i_addiu(&p, V1, ZERO, 1);
    330	uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
    331	uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
    332	uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
    333
    334	if (cpu_has_guestid) {
    335		/*
    336		 * Set root mode GuestID, so that root TLB refill handler can
    337		 * use the correct GuestID in the root TLB.
    338		 */
    339
    340		/* Get current GuestID */
    341		uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
    342		/* Set GuestCtl1.RID = GuestCtl1.ID */
    343		uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
    344			   MIPS_GCTL1_ID_WIDTH);
    345		uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
    346			   MIPS_GCTL1_RID_WIDTH);
    347		uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
    348
    349		/* GuestID handles dealiasing so we don't need to touch ASID */
    350		goto skip_asid_restore;
    351	}
    352
    353	/* Root ASID Dealias (RAD) */
    354
    355	/* Save host ASID */
    356	UASM_i_MFC0(&p, K0, C0_ENTRYHI);
    357	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
    358		  K1);
    359
    360	/* Set the root ASID for the Guest */
    361	UASM_i_ADDIU(&p, T1, S0,
    362		     offsetof(struct kvm, arch.gpa_mm.context.asid));
    363
    364	/* t1: contains the base of the ASID array, need to get the cpu id  */
    365	/* smp_processor_id */
    366	uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
    367	/* index the ASID array */
    368	uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
    369	UASM_i_ADDU(&p, T3, T1, T2);
    370	UASM_i_LW(&p, K0, 0, T3);
    371#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
    372	/*
    373	 * reuse ASID array offset
    374	 * cpuinfo_mips is a multiple of sizeof(long)
    375	 */
    376	uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
    377	uasm_i_mul(&p, T2, T2, T3);
    378
    379	UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
    380	UASM_i_ADDU(&p, AT, AT, T2);
    381	UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
    382	uasm_i_and(&p, K0, K0, T2);
    383#else
    384	uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
    385#endif
    386
    387	/* Set up KVM VZ root ASID (!guestid) */
    388	uasm_i_mtc0(&p, K0, C0_ENTRYHI);
    389skip_asid_restore:
    390	uasm_i_ehb(&p);
    391
    392	/* Disable RDHWR access */
    393	uasm_i_mtc0(&p, ZERO, C0_HWRENA);
    394
    395	/* load the guest context from VCPU and return */
    396	for (i = 1; i < 32; ++i) {
    397		/* Guest k0/k1 loaded later */
    398		if (i == K0 || i == K1)
    399			continue;
    400		UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
    401	}
    402
    403#ifndef CONFIG_CPU_MIPSR6
    404	/* Restore hi/lo */
    405	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
    406	uasm_i_mthi(&p, K0);
    407
    408	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
    409	uasm_i_mtlo(&p, K0);
    410#endif
    411
    412	/* Restore the guest's k0/k1 registers */
    413	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
    414	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
    415
    416	/* Jump to guest */
    417	uasm_i_eret(&p);
    418
    419	uasm_resolve_relocs(relocs, labels);
    420
    421	return p;
    422}
    423
    424/**
    425 * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
    426 * @addr:	Address to start writing code.
    427 * @handler:	Address of common handler (within range of @addr).
    428 *
    429 * Assemble TLB refill exception fast path handler for guest execution.
    430 *
    431 * Returns:	Next address after end of written function.
    432 */
    433void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
    434{
    435	u32 *p = addr;
    436	struct uasm_label labels[2];
    437	struct uasm_reloc relocs[2];
    438#ifndef CONFIG_CPU_LOONGSON64
    439	struct uasm_label *l = labels;
    440	struct uasm_reloc *r = relocs;
    441#endif
    442
    443	memset(labels, 0, sizeof(labels));
    444	memset(relocs, 0, sizeof(relocs));
    445
    446	/* Save guest k1 into scratch register */
    447	UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
    448
    449	/* Get the VCPU pointer from the VCPU scratch register */
    450	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
    451
    452	/* Save guest k0 into VCPU structure */
    453	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
    454
    455	/*
    456	 * Some of the common tlbex code uses current_cpu_type(). For KVM we
    457	 * assume symmetry and just disable preemption to silence the warning.
    458	 */
    459	preempt_disable();
    460
    461#ifdef CONFIG_CPU_LOONGSON64
    462	UASM_i_MFC0(&p, K1, C0_PGD);
    463	uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
    464#ifndef __PAGETABLE_PMD_FOLDED
    465	uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
    466#endif
    467	uasm_i_ldpte(&p, K1, 0);      /* even */
    468	uasm_i_ldpte(&p, K1, 1);      /* odd */
    469	uasm_i_tlbwr(&p);
    470#else
    471	/*
    472	 * Now for the actual refill bit. A lot of this can be common with the
    473	 * Linux TLB refill handler, however we don't need to handle so many
    474	 * cases. We only need to handle user mode refills, and user mode runs
    475	 * with 32-bit addressing.
    476	 *
    477	 * Therefore the branch to label_vmalloc generated by build_get_pmde64()
    478	 * that isn't resolved should never actually get taken and is harmless
    479	 * to leave in place for now.
    480	 */
    481
    482#ifdef CONFIG_64BIT
    483	build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
    484#else
    485	build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
    486#endif
    487
    488	/* we don't support huge pages yet */
    489
    490	build_get_ptep(&p, K0, K1);
    491	build_update_entries(&p, K0, K1);
    492	build_tlb_write_entry(&p, &l, &r, tlb_random);
    493#endif
    494
    495	preempt_enable();
    496
    497	/* Get the VCPU pointer from the VCPU scratch register again */
    498	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
    499
    500	/* Restore the guest's k0/k1 registers */
    501	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
    502	uasm_i_ehb(&p);
    503	UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
    504
    505	/* Jump to guest */
    506	uasm_i_eret(&p);
    507
    508	return p;
    509}
    510
    511/**
    512 * kvm_mips_build_exception() - Assemble first level guest exception handler.
    513 * @addr:	Address to start writing code.
    514 * @handler:	Address of common handler (within range of @addr).
    515 *
    516 * Assemble exception vector code for guest execution. The generated vector will
    517 * branch to the common exception handler generated by kvm_mips_build_exit().
    518 *
    519 * Returns:	Next address after end of written function.
    520 */
    521void *kvm_mips_build_exception(void *addr, void *handler)
    522{
    523	u32 *p = addr;
    524	struct uasm_label labels[2];
    525	struct uasm_reloc relocs[2];
    526	struct uasm_label *l = labels;
    527	struct uasm_reloc *r = relocs;
    528
    529	memset(labels, 0, sizeof(labels));
    530	memset(relocs, 0, sizeof(relocs));
    531
    532	/* Save guest k1 into scratch register */
    533	UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
    534
    535	/* Get the VCPU pointer from the VCPU scratch register */
    536	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
    537	UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
    538
    539	/* Save guest k0 into VCPU structure */
    540	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
    541
    542	/* Branch to the common handler */
    543	uasm_il_b(&p, &r, label_exit_common);
    544	 uasm_i_nop(&p);
    545
    546	uasm_l_exit_common(&l, handler);
    547	uasm_resolve_relocs(relocs, labels);
    548
    549	return p;
    550}
    551
    552/**
    553 * kvm_mips_build_exit() - Assemble common guest exit handler.
    554 * @addr:	Address to start writing code.
    555 *
    556 * Assemble the generic guest exit handling code. This is called by the
    557 * exception vectors (generated by kvm_mips_build_exception()), and calls
    558 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
    559 * depending on the return value.
    560 *
    561 * Returns:	Next address after end of written function.
    562 */
    563void *kvm_mips_build_exit(void *addr)
    564{
    565	u32 *p = addr;
    566	unsigned int i;
    567	struct uasm_label labels[3];
    568	struct uasm_reloc relocs[3];
    569	struct uasm_label *l = labels;
    570	struct uasm_reloc *r = relocs;
    571
    572	memset(labels, 0, sizeof(labels));
    573	memset(relocs, 0, sizeof(relocs));
    574
    575	/*
    576	 * Generic Guest exception handler. We end up here when the guest
    577	 * does something that causes a trap to kernel mode.
    578	 *
    579	 * Both k0/k1 registers will have already been saved (k0 into the vcpu
    580	 * structure, and k1 into the scratch_tmp register).
    581	 *
    582	 * The k1 register will already contain the kvm_vcpu_arch pointer.
    583	 */
    584
    585	/* Start saving Guest context to VCPU */
    586	for (i = 0; i < 32; ++i) {
    587		/* Guest k0/k1 saved later */
    588		if (i == K0 || i == K1)
    589			continue;
    590		UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
    591	}
    592
    593#ifndef CONFIG_CPU_MIPSR6
    594	/* We need to save hi/lo and restore them on the way out */
    595	uasm_i_mfhi(&p, T0);
    596	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
    597
    598	uasm_i_mflo(&p, T0);
    599	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
    600#endif
    601
    602	/* Finally save guest k1 to VCPU */
    603	uasm_i_ehb(&p);
    604	UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
    605	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
    606
    607	/* Now that context has been saved, we can use other registers */
    608
    609	/* Restore vcpu */
    610	UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
    611
    612	/*
    613	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
    614	 * the exception
    615	 */
    616	UASM_i_MFC0(&p, K0, C0_EPC);
    617	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
    618
    619	UASM_i_MFC0(&p, K0, C0_BADVADDR);
    620	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
    621		  K1);
    622
    623	uasm_i_mfc0(&p, K0, C0_CAUSE);
    624	uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
    625
    626	if (cpu_has_badinstr) {
    627		uasm_i_mfc0(&p, K0, C0_BADINSTR);
    628		uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
    629					   host_cp0_badinstr), K1);
    630	}
    631
    632	if (cpu_has_badinstrp) {
    633		uasm_i_mfc0(&p, K0, C0_BADINSTRP);
    634		uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
    635					   host_cp0_badinstrp), K1);
    636	}
    637
    638	/* Now restore the host state just enough to run the handlers */
    639
    640	/* Switch EBASE to the one used by Linux */
    641	/* load up the host EBASE */
    642	uasm_i_mfc0(&p, V0, C0_STATUS);
    643
    644	uasm_i_lui(&p, AT, ST0_BEV >> 16);
    645	uasm_i_or(&p, K0, V0, AT);
    646
    647	uasm_i_mtc0(&p, K0, C0_STATUS);
    648	uasm_i_ehb(&p);
    649
    650	UASM_i_LA_mostly(&p, K0, (long)&ebase);
    651	UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
    652	build_set_exc_base(&p, K0);
    653
    654	if (raw_cpu_has_fpu) {
    655		/*
    656		 * If FPU is enabled, save FCR31 and clear it so that later
    657		 * ctc1's don't trigger FPE for pending exceptions.
    658		 */
    659		uasm_i_lui(&p, AT, ST0_CU1 >> 16);
    660		uasm_i_and(&p, V1, V0, AT);
    661		uasm_il_beqz(&p, &r, V1, label_fpu_1);
    662		 uasm_i_nop(&p);
    663		uasm_i_cfc1(&p, T0, 31);
    664		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
    665			  K1);
    666		uasm_i_ctc1(&p, ZERO, 31);
    667		uasm_l_fpu_1(&l, p);
    668	}
    669
    670	if (cpu_has_msa) {
    671		/*
    672		 * If MSA is enabled, save MSACSR and clear it so that later
    673		 * instructions don't trigger MSAFPE for pending exceptions.
    674		 */
    675		uasm_i_mfc0(&p, T0, C0_CONFIG5);
    676		uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
    677		uasm_il_beqz(&p, &r, T0, label_msa_1);
    678		 uasm_i_nop(&p);
    679		uasm_i_cfcmsa(&p, T0, MSA_CSR);
    680		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
    681			  K1);
    682		uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
    683		uasm_l_msa_1(&l, p);
    684	}
    685
    686	/* Restore host ASID */
    687	if (!cpu_has_guestid) {
    688		UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
    689			  K1);
    690		UASM_i_MTC0(&p, K0, C0_ENTRYHI);
    691	}
    692
    693	/*
    694	 * Set up normal Linux process pgd.
    695	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
    696	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
    697	 * - write mm->pgd into CP0_PWBase
    698	 */
    699	UASM_i_LW(&p, A0,
    700		  offsetof(struct kvm_vcpu_arch, host_pgd), K1);
    701	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
    702	uasm_i_jalr(&p, RA, T9);
    703	/* delay slot */
    704	if (cpu_has_htw)
    705		UASM_i_MTC0(&p, A0, C0_PWBASE);
    706	else
    707		uasm_i_nop(&p);
    708
    709	/* Clear GM bit so we don't enter guest mode when EXL is cleared */
    710	uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
    711	uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
    712	uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
    713
    714	/* Save GuestCtl0 so we can access GExcCode after CPU migration */
    715	uasm_i_sw(&p, K0,
    716		  offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
    717
    718	if (cpu_has_guestid) {
    719		/*
    720		 * Clear root mode GuestID, so that root TLB operations use the
    721		 * root GuestID in the root TLB.
    722		 */
    723		uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
    724		/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
    725		uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
    726			   MIPS_GCTL1_RID_WIDTH);
    727		uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
    728	}
    729
    730	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
    731	uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
    732	uasm_i_and(&p, V0, V0, AT);
    733	uasm_i_lui(&p, AT, ST0_CU0 >> 16);
    734	uasm_i_or(&p, V0, V0, AT);
    735#ifdef CONFIG_64BIT
    736	uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
    737#endif
    738	uasm_i_mtc0(&p, V0, C0_STATUS);
    739	uasm_i_ehb(&p);
    740
    741	/* Load up host GP */
    742	UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
    743
    744	/* Need a stack before we can jump to "C" */
    745	UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
    746
    747	/* Saved host state */
    748	UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
    749
    750	/*
    751	 * XXXKYMA do we need to load the host ASID, maybe not because the
    752	 * kernel entries are marked GLOBAL, need to verify
    753	 */
    754
    755	/* Restore host scratch registers, as we'll have clobbered them */
    756	kvm_mips_build_restore_scratch(&p, K0, SP);
    757
    758	/* Restore RDHWR access */
    759	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
    760	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
    761	uasm_i_mtc0(&p, K0, C0_HWRENA);
    762
    763	/* Jump to handler */
    764	/*
    765	 * XXXKYMA: not sure if this is safe, how large is the stack??
    766	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
    767	 * with this in the kernel
    768	 */
    769	uasm_i_move(&p, A0, S0);
    770	UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
    771	uasm_i_jalr(&p, RA, T9);
    772	 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
    773
    774	uasm_resolve_relocs(relocs, labels);
    775
    776	p = kvm_mips_build_ret_from_exit(p);
    777
    778	return p;
    779}
    780
    781/**
    782 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
    783 * @addr:	Address to start writing code.
    784 *
    785 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
    786 * resuming the guest or returning to the host depending on the return value.
    787 *
    788 * Returns:	Next address after end of written function.
    789 */
    790static void *kvm_mips_build_ret_from_exit(void *addr)
    791{
    792	u32 *p = addr;
    793	struct uasm_label labels[2];
    794	struct uasm_reloc relocs[2];
    795	struct uasm_label *l = labels;
    796	struct uasm_reloc *r = relocs;
    797
    798	memset(labels, 0, sizeof(labels));
    799	memset(relocs, 0, sizeof(relocs));
    800
    801	/* Return from handler Make sure interrupts are disabled */
    802	uasm_i_di(&p, ZERO);
    803	uasm_i_ehb(&p);
    804
    805	/*
    806	 * XXXKYMA: k0/k1 could have been blown away if we processed
    807	 * an exception while we were handling the exception from the
    808	 * guest, reload k1
    809	 */
    810
    811	uasm_i_move(&p, K1, S0);
    812	UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
    813
    814	/*
    815	 * Check return value, should tell us if we are returning to the
    816	 * host (handle I/O etc)or resuming the guest
    817	 */
    818	uasm_i_andi(&p, T0, V0, RESUME_HOST);
    819	uasm_il_bnez(&p, &r, T0, label_return_to_host);
    820	 uasm_i_nop(&p);
    821
    822	p = kvm_mips_build_ret_to_guest(p);
    823
    824	uasm_l_return_to_host(&l, p);
    825	p = kvm_mips_build_ret_to_host(p);
    826
    827	uasm_resolve_relocs(relocs, labels);
    828
    829	return p;
    830}
    831
    832/**
    833 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
    834 * @addr:	Address to start writing code.
    835 *
    836 * Assemble the code to handle return from the guest exit handler
    837 * (kvm_mips_handle_exit()) back to the guest.
    838 *
    839 * Returns:	Next address after end of written function.
    840 */
    841static void *kvm_mips_build_ret_to_guest(void *addr)
    842{
    843	u32 *p = addr;
    844
    845	/* Put the saved pointer to vcpu (s0) back into the scratch register */
    846	UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
    847
    848	/* Load up the Guest EBASE to minimize the window where BEV is set */
    849	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
    850
    851	/* Switch EBASE back to the one used by KVM */
    852	uasm_i_mfc0(&p, V1, C0_STATUS);
    853	uasm_i_lui(&p, AT, ST0_BEV >> 16);
    854	uasm_i_or(&p, K0, V1, AT);
    855	uasm_i_mtc0(&p, K0, C0_STATUS);
    856	uasm_i_ehb(&p);
    857	build_set_exc_base(&p, T0);
    858
    859	/* Setup status register for running guest in UM */
    860	uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
    861	UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
    862	uasm_i_and(&p, V1, V1, AT);
    863	uasm_i_mtc0(&p, V1, C0_STATUS);
    864	uasm_i_ehb(&p);
    865
    866	p = kvm_mips_build_enter_guest(p);
    867
    868	return p;
    869}
    870
    871/**
    872 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
    873 * @addr:	Address to start writing code.
    874 *
    875 * Assemble the code to handle return from the guest exit handler
    876 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
    877 * function generated by kvm_mips_build_vcpu_run().
    878 *
    879 * Returns:	Next address after end of written function.
    880 */
    881static void *kvm_mips_build_ret_to_host(void *addr)
    882{
    883	u32 *p = addr;
    884	unsigned int i;
    885
    886	/* EBASE is already pointing to Linux */
    887	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
    888	UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
    889
    890	/*
    891	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
    892	 * to recover the err code
    893	 */
    894	uasm_i_sra(&p, K0, V0, 2);
    895	uasm_i_move(&p, V0, K0);
    896
    897	/* Load context saved on the host stack */
    898	for (i = 16; i < 31; ++i) {
    899		if (i == 24)
    900			i = 28;
    901		UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
    902	}
    903
    904	/* Restore RDHWR access */
    905	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
    906	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
    907	uasm_i_mtc0(&p, K0, C0_HWRENA);
    908
    909	/* Restore RA, which is the address we will return to */
    910	UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
    911	uasm_i_jr(&p, RA);
    912	 uasm_i_nop(&p);
    913
    914	return p;
    915}
    916