cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kvm_book3s_64.h (18371B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *
      4 * Copyright SUSE Linux Products GmbH 2010
      5 *
      6 * Authors: Alexander Graf <agraf@suse.de>
      7 */
      8
      9#ifndef __ASM_KVM_BOOK3S_64_H__
     10#define __ASM_KVM_BOOK3S_64_H__
     11
     12#include <linux/string.h>
     13#include <asm/bitops.h>
     14#include <asm/book3s/64/mmu-hash.h>
     15#include <asm/cpu_has_feature.h>
     16#include <asm/ppc-opcode.h>
     17#include <asm/pte-walk.h>
     18
     19/*
     20 * Structure for a nested guest, that is, for a guest that is managed by
     21 * one of our guests.
     22 */
     23struct kvm_nested_guest {
     24	struct kvm *l1_host;		/* L1 VM that owns this nested guest */
     25	int l1_lpid;			/* lpid L1 guest thinks this guest is */
     26	int shadow_lpid;		/* real lpid of this nested guest */
     27	pgd_t *shadow_pgtable;		/* our page table for this guest */
     28	u64 l1_gr_to_hr;		/* L1's addr of part'n-scoped table */
     29	u64 process_table;		/* process table entry for this guest */
     30	long refcnt;			/* number of pointers to this struct */
     31	struct mutex tlb_lock;		/* serialize page faults and tlbies */
     32	struct kvm_nested_guest *next;
     33	cpumask_t need_tlb_flush;
     34	short prev_cpu[NR_CPUS];
     35	u8 radix;			/* is this nested guest radix */
     36};
     37
     38/*
     39 * We define a nested rmap entry as a single 64-bit quantity
     40 * 0xFFF0000000000000	12-bit lpid field
     41 * 0x000FFFFFFFFFF000	40-bit guest 4k page frame number
     42 * 0x0000000000000001	1-bit  single entry flag
     43 */
     44#define RMAP_NESTED_LPID_MASK		0xFFF0000000000000UL
     45#define RMAP_NESTED_LPID_SHIFT		(52)
     46#define RMAP_NESTED_GPA_MASK		0x000FFFFFFFFFF000UL
     47#define RMAP_NESTED_IS_SINGLE_ENTRY	0x0000000000000001UL
     48
     49/* Structure for a nested guest rmap entry */
     50struct rmap_nested {
     51	struct llist_node list;
     52	u64 rmap;
     53};
     54
     55/*
     56 * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
     57 *			     safe against removal of the list entry or NULL list
     58 * @pos:	a (struct rmap_nested *) to use as a loop cursor
     59 * @node:	pointer to the first entry
     60 *		NOTE: this can be NULL
     61 * @rmapp:	an (unsigned long *) in which to return the rmap entries on each
     62 *		iteration
     63 *		NOTE: this must point to already allocated memory
     64 *
     65 * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
     66 * rmap entry in the memslot. The list is always terminated by a "single entry"
     67 * stored in the list element of the final entry of the llist. If there is ONLY
     68 * a single entry then this is itself in the rmap entry of the memslot, not a
     69 * llist head pointer.
     70 *
     71 * Note that the iterator below assumes that a nested rmap entry is always
     72 * non-zero.  This is true for our usage because the LPID field is always
     73 * non-zero (zero is reserved for the host).
     74 *
     75 * This should be used to iterate over the list of rmap_nested entries with
     76 * processing done on the u64 rmap value given by each iteration. This is safe
     77 * against removal of list entries and it is always safe to call free on (pos).
     78 *
     79 * e.g.
     80 * struct rmap_nested *cursor;
     81 * struct llist_node *first;
     82 * unsigned long rmap;
     83 * for_each_nest_rmap_safe(cursor, first, &rmap) {
     84 *	do_something(rmap);
     85 *	free(cursor);
     86 * }
     87 */
     88#define for_each_nest_rmap_safe(pos, node, rmapp)			       \
     89	for ((pos) = llist_entry((node), typeof(*(pos)), list);		       \
     90	     (node) &&							       \
     91	     (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ?     \
     92			  ((u64) (node)) : ((pos)->rmap))) &&		       \
     93	     (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ?      \
     94			 ((struct llist_node *) ((pos) = NULL)) :	       \
     95			 (pos)->list.next)), true);			       \
     96	     (pos) = llist_entry((node), typeof(*(pos)), list))
     97
     98struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
     99					  bool create);
    100void kvmhv_put_nested(struct kvm_nested_guest *gp);
    101int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
    102
    103/* Encoding of first parameter for H_TLB_INVALIDATE */
    104#define H_TLBIE_P1_ENC(ric, prs, r)	(___PPC_RIC(ric) | ___PPC_PRS(prs) | \
    105					 ___PPC_R(r))
    106
    107/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
    108#define PPC_MIN_HPT_ORDER	18
    109#define PPC_MAX_HPT_ORDER	46
    110
    111#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
    112static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
    113{
    114	preempt_disable();
    115	return &get_paca()->shadow_vcpu;
    116}
    117
    118static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
    119{
    120	preempt_enable();
    121}
    122#endif
    123
    124#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
    125
    126static inline bool kvm_is_radix(struct kvm *kvm)
    127{
    128	return kvm->arch.radix;
    129}
    130
    131static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
    132{
    133	bool radix;
    134
    135	if (vcpu->arch.nested)
    136		radix = vcpu->arch.nested->radix;
    137	else
    138		radix = kvm_is_radix(vcpu->kvm);
    139
    140	return radix;
    141}
    142
    143unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr);
    144
    145int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
    146
    147#define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
    148#endif
    149
    150/*
    151 * Invalid HDSISR value which is used to indicate when HW has not set the reg.
    152 * Used to work around an errata.
    153 */
    154#define HDSISR_CANARY	0x7fff
    155
    156/*
    157 * We use a lock bit in HPTE dword 0 to synchronize updates and
    158 * accesses to each HPTE, and another bit to indicate non-present
    159 * HPTEs.
    160 */
    161#define HPTE_V_HVLOCK	0x40UL
    162#define HPTE_V_ABSENT	0x20UL
    163
    164/*
    165 * We use this bit in the guest_rpte field of the revmap entry
    166 * to indicate a modified HPTE.
    167 */
    168#define HPTE_GR_MODIFIED	(1ul << 62)
    169
    170/* These bits are reserved in the guest view of the HPTE */
    171#define HPTE_GR_RESERVED	HPTE_GR_MODIFIED
    172
    173static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
    174{
    175	unsigned long tmp, old;
    176	__be64 be_lockbit, be_bits;
    177
    178	/*
    179	 * We load/store in native endian, but the HTAB is in big endian. If
    180	 * we byte swap all data we apply on the PTE we're implicitly correct
    181	 * again.
    182	 */
    183	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
    184	be_bits = cpu_to_be64(bits);
    185
    186	asm volatile("	ldarx	%0,0,%2\n"
    187		     "	and.	%1,%0,%3\n"
    188		     "	bne	2f\n"
    189		     "	or	%0,%0,%4\n"
    190		     "  stdcx.	%0,0,%2\n"
    191		     "	beq+	2f\n"
    192		     "	mr	%1,%3\n"
    193		     "2:	isync"
    194		     : "=&r" (tmp), "=&r" (old)
    195		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
    196		     : "cc", "memory");
    197	return old == 0;
    198}
    199
    200static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
    201{
    202	hpte_v &= ~HPTE_V_HVLOCK;
    203	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
    204	hpte[0] = cpu_to_be64(hpte_v);
    205}
    206
    207/* Without barrier */
    208static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
    209{
    210	hpte_v &= ~HPTE_V_HVLOCK;
    211	hpte[0] = cpu_to_be64(hpte_v);
    212}
    213
    214/*
    215 * These functions encode knowledge of the POWER7/8/9 hardware
    216 * interpretations of the HPTE LP (large page size) field.
    217 */
    218static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
    219{
    220	unsigned int lphi;
    221
    222	if (!(h & HPTE_V_LARGE))
    223		return 12;	/* 4kB */
    224	lphi = (l >> 16) & 0xf;
    225	switch ((l >> 12) & 0xf) {
    226	case 0:
    227		return !lphi ? 24 : 0;		/* 16MB */
    228		break;
    229	case 1:
    230		return 16;			/* 64kB */
    231		break;
    232	case 3:
    233		return !lphi ? 34 : 0;		/* 16GB */
    234		break;
    235	case 7:
    236		return (16 << 8) + 12;		/* 64kB in 4kB */
    237		break;
    238	case 8:
    239		if (!lphi)
    240			return (24 << 8) + 16;	/* 16MB in 64kkB */
    241		if (lphi == 3)
    242			return (24 << 8) + 12;	/* 16MB in 4kB */
    243		break;
    244	}
    245	return 0;
    246}
    247
    248static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
    249{
    250	return kvmppc_hpte_page_shifts(h, l) & 0xff;
    251}
    252
    253static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
    254{
    255	int tmp = kvmppc_hpte_page_shifts(h, l);
    256
    257	if (tmp >= 0x100)
    258		tmp >>= 8;
    259	return tmp;
    260}
    261
    262static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
    263{
    264	int shift = kvmppc_hpte_actual_page_shift(v, r);
    265
    266	if (shift)
    267		return 1ul << shift;
    268	return 0;
    269}
    270
    271static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
    272{
    273	switch (base_shift) {
    274	case 12:
    275		switch (actual_shift) {
    276		case 12:
    277			return 0;
    278		case 16:
    279			return 7;
    280		case 24:
    281			return 0x38;
    282		}
    283		break;
    284	case 16:
    285		switch (actual_shift) {
    286		case 16:
    287			return 1;
    288		case 24:
    289			return 8;
    290		}
    291		break;
    292	case 24:
    293		return 0;
    294	}
    295	return -1;
    296}
    297
    298static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
    299					     unsigned long pte_index)
    300{
    301	int a_pgshift, b_pgshift;
    302	unsigned long rb = 0, va_low, sllp;
    303
    304	b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
    305	if (a_pgshift >= 0x100) {
    306		b_pgshift &= 0xff;
    307		a_pgshift >>= 8;
    308	}
    309
    310	/*
    311	 * Ignore the top 14 bits of va
    312	 * v have top two bits covering segment size, hence move
    313	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
    314	 * AVA field in v also have the lower 23 bits ignored.
    315	 * For base page size 4K we need 14 .. 65 bits (so need to
    316	 * collect extra 11 bits)
    317	 * For others we need 14..14+i
    318	 */
    319	/* This covers 14..54 bits of va*/
    320	rb = (v & ~0x7fUL) << 16;		/* AVA field */
    321
    322	/*
    323	 * AVA in v had cleared lower 23 bits. We need to derive
    324	 * that from pteg index
    325	 */
    326	va_low = pte_index >> 3;
    327	if (v & HPTE_V_SECONDARY)
    328		va_low = ~va_low;
    329	/*
    330	 * get the vpn bits from va_low using reverse of hashing.
    331	 * In v we have va with 23 bits dropped and then left shifted
    332	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
    333	 * right shift it with (SID_SHIFT - (23 - 7))
    334	 */
    335	if (!(v & HPTE_V_1TB_SEG))
    336		va_low ^= v >> (SID_SHIFT - 16);
    337	else
    338		va_low ^= v >> (SID_SHIFT_1T - 16);
    339	va_low &= 0x7ff;
    340
    341	if (b_pgshift <= 12) {
    342		if (a_pgshift > 12) {
    343			sllp = (a_pgshift == 16) ? 5 : 4;
    344			rb |= sllp << 5;	/*  AP field */
    345		}
    346		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
    347	} else {
    348		int aval_shift;
    349		/*
    350		 * remaining bits of AVA/LP fields
    351		 * Also contain the rr bits of LP
    352		 */
    353		rb |= (va_low << b_pgshift) & 0x7ff000;
    354		/*
    355		 * Now clear not needed LP bits based on actual psize
    356		 */
    357		rb &= ~((1ul << a_pgshift) - 1);
    358		/*
    359		 * AVAL field 58..77 - base_page_shift bits of va
    360		 * we have space for 58..64 bits, Missing bits should
    361		 * be zero filled. +1 is to take care of L bit shift
    362		 */
    363		aval_shift = 64 - (77 - b_pgshift) + 1;
    364		rb |= ((va_low << aval_shift) & 0xfe);
    365
    366		rb |= 1;		/* L field */
    367		rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
    368	}
    369	/*
    370	 * This sets both bits of the B field in the PTE. 0b1x values are
    371	 * reserved, but those will have been filtered by kvmppc_do_h_enter.
    372	 */
    373	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/* B field */
    374	return rb;
    375}
    376
    377static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
    378{
    379	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
    380}
    381
    382static inline int hpte_is_writable(unsigned long ptel)
    383{
    384	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
    385
    386	return pp != PP_RXRX && pp != PP_RXXX;
    387}
    388
    389static inline unsigned long hpte_make_readonly(unsigned long ptel)
    390{
    391	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
    392		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
    393	else
    394		ptel |= PP_RXRX;
    395	return ptel;
    396}
    397
    398static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
    399{
    400	unsigned int wimg = hptel & HPTE_R_WIMG;
    401
    402	/* Handle SAO */
    403	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
    404	    cpu_has_feature(CPU_FTR_ARCH_206))
    405		wimg = HPTE_R_M;
    406
    407	if (!is_ci)
    408		return wimg == HPTE_R_M;
    409	/*
    410	 * if host is mapped cache inhibited, make sure hptel also have
    411	 * cache inhibited.
    412	 */
    413	if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
    414		return false;
    415	return !!(wimg & HPTE_R_I);
    416}
    417
    418/*
    419 * If it's present and writable, atomically set dirty and referenced bits and
    420 * return the PTE, otherwise return 0.
    421 */
    422static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
    423{
    424	pte_t old_pte, new_pte = __pte(0);
    425
    426	while (1) {
    427		/*
    428		 * Make sure we don't reload from ptep
    429		 */
    430		old_pte = READ_ONCE(*ptep);
    431		/*
    432		 * wait until H_PAGE_BUSY is clear then set it atomically
    433		 */
    434		if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
    435			cpu_relax();
    436			continue;
    437		}
    438		/* If pte is not present return None */
    439		if (unlikely(!pte_present(old_pte)))
    440			return __pte(0);
    441
    442		new_pte = pte_mkyoung(old_pte);
    443		if (writing && pte_write(old_pte))
    444			new_pte = pte_mkdirty(new_pte);
    445
    446		if (pte_xchg(ptep, old_pte, new_pte))
    447			break;
    448	}
    449	return new_pte;
    450}
    451
    452static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
    453{
    454	if (key)
    455		return PP_RWRX <= pp && pp <= PP_RXRX;
    456	return true;
    457}
    458
    459static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
    460{
    461	if (key)
    462		return pp == PP_RWRW;
    463	return pp <= PP_RWRW;
    464}
    465
    466static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
    467{
    468	unsigned long skey;
    469
    470	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
    471		((hpte_r & HPTE_R_KEY_LO) >> 9);
    472	return (amr >> (62 - 2 * skey)) & 3;
    473}
    474
    475static inline void lock_rmap(unsigned long *rmap)
    476{
    477	do {
    478		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
    479			cpu_relax();
    480	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
    481}
    482
    483static inline void unlock_rmap(unsigned long *rmap)
    484{
    485	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
    486}
    487
    488static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
    489				   unsigned long pagesize)
    490{
    491	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
    492
    493	if (pagesize <= PAGE_SIZE)
    494		return true;
    495	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
    496}
    497
    498/*
    499 * This works for 4k, 64k and 16M pages on POWER7,
    500 * and 4k and 16M pages on PPC970.
    501 */
    502static inline unsigned long slb_pgsize_encoding(unsigned long psize)
    503{
    504	unsigned long senc = 0;
    505
    506	if (psize > 0x1000) {
    507		senc = SLB_VSID_L;
    508		if (psize == 0x10000)
    509			senc |= SLB_VSID_LP_01;
    510	}
    511	return senc;
    512}
    513
    514static inline int is_vrma_hpte(unsigned long hpte_v)
    515{
    516	return (hpte_v & ~0xffffffUL) ==
    517		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
    518}
    519
    520#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
    521/*
    522 * Note modification of an HPTE; set the HPTE modified bit
    523 * if anyone is interested.
    524 */
    525static inline void note_hpte_modification(struct kvm *kvm,
    526					  struct revmap_entry *rev)
    527{
    528	if (atomic_read(&kvm->arch.hpte_mod_interest))
    529		rev->guest_rpte |= HPTE_GR_MODIFIED;
    530}
    531
    532/*
    533 * Like kvm_memslots(), but for use in real mode when we can't do
    534 * any RCU stuff (since the secondary threads are offline from the
    535 * kernel's point of view), and we can't print anything.
    536 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
    537 */
    538static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
    539{
    540	return rcu_dereference_raw_check(kvm->memslots[0]);
    541}
    542
    543extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
    544extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
    545
    546extern void kvmhv_rm_send_ipi(int cpu);
    547
    548static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
    549{
    550	/* HPTEs are 2**4 bytes long */
    551	return 1UL << (hpt->order - 4);
    552}
    553
    554static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
    555{
    556	/* 128 (2**7) bytes in each HPTEG */
    557	return (1UL << (hpt->order - 7)) - 1;
    558}
    559
    560/* Set bits in a dirty bitmap, which is in LE format */
    561static inline void set_dirty_bits(unsigned long *map, unsigned long i,
    562				  unsigned long npages)
    563{
    564
    565	if (npages >= 8)
    566		memset((char *)map + i / 8, 0xff, npages / 8);
    567	else
    568		for (; npages; ++i, --npages)
    569			__set_bit_le(i, map);
    570}
    571
    572static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
    573					 unsigned long npages)
    574{
    575	if (npages >= 8)
    576		memset((char *)map + i / 8, 0xff, npages / 8);
    577	else
    578		for (; npages; ++i, --npages)
    579			set_bit_le(i, map);
    580}
    581
    582static inline u64 sanitize_msr(u64 msr)
    583{
    584	msr &= ~MSR_HV;
    585	msr |= MSR_ME;
    586	return msr;
    587}
    588
    589#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
    590static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
    591{
    592	vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
    593	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
    594	vcpu->arch.regs.link  = vcpu->arch.lr_tm;
    595	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
    596	vcpu->arch.amr = vcpu->arch.amr_tm;
    597	vcpu->arch.ppr = vcpu->arch.ppr_tm;
    598	vcpu->arch.dscr = vcpu->arch.dscr_tm;
    599	vcpu->arch.tar = vcpu->arch.tar_tm;
    600	memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
    601	       sizeof(vcpu->arch.regs.gpr));
    602	vcpu->arch.fp  = vcpu->arch.fp_tm;
    603	vcpu->arch.vr  = vcpu->arch.vr_tm;
    604	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
    605}
    606
    607static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
    608{
    609	vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
    610	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
    611	vcpu->arch.lr_tm  = vcpu->arch.regs.link;
    612	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
    613	vcpu->arch.amr_tm = vcpu->arch.amr;
    614	vcpu->arch.ppr_tm = vcpu->arch.ppr;
    615	vcpu->arch.dscr_tm = vcpu->arch.dscr;
    616	vcpu->arch.tar_tm = vcpu->arch.tar;
    617	memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
    618	       sizeof(vcpu->arch.regs.gpr));
    619	vcpu->arch.fp_tm  = vcpu->arch.fp;
    620	vcpu->arch.vr_tm  = vcpu->arch.vr;
    621	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
    622}
    623#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
    624
    625extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
    626			     unsigned long gpa, unsigned int level,
    627			     unsigned long mmu_seq, unsigned int lpid,
    628			     unsigned long *rmapp, struct rmap_nested **n_rmap);
    629extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
    630				   struct rmap_nested **n_rmap);
    631extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
    632					   unsigned long clr, unsigned long set,
    633					   unsigned long hpa, unsigned long nbytes);
    634extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
    635				const struct kvm_memory_slot *memslot,
    636				unsigned long gpa, unsigned long hpa,
    637				unsigned long nbytes);
    638
    639static inline pte_t *
    640find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
    641				unsigned *hshift)
    642{
    643	pte_t *pte;
    644
    645	pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
    646	return pte;
    647}
    648
    649static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
    650					    unsigned *hshift)
    651{
    652	pte_t *pte;
    653
    654	VM_WARN(!spin_is_locked(&kvm->mmu_lock),
    655		"%s called with kvm mmu_lock not held \n", __func__);
    656	pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
    657
    658	return pte;
    659}
    660
    661static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
    662				       unsigned long ea, unsigned *hshift)
    663{
    664	pte_t *pte;
    665
    666	VM_WARN(!spin_is_locked(&kvm->mmu_lock),
    667		"%s called with kvm mmu_lock not held \n", __func__);
    668
    669	if (mmu_notifier_retry(kvm, mmu_seq))
    670		return NULL;
    671
    672	pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
    673
    674	return pte;
    675}
    676
    677extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
    678					unsigned long ea, unsigned *hshift);
    679
    680#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
    681
    682#endif /* __ASM_KVM_BOOK3S_64_H__ */