cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

book3s_hv_builtin.c (16467B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
      4 */
      5
      6#include <linux/cpu.h>
      7#include <linux/kvm_host.h>
      8#include <linux/preempt.h>
      9#include <linux/export.h>
     10#include <linux/sched.h>
     11#include <linux/spinlock.h>
     12#include <linux/init.h>
     13#include <linux/memblock.h>
     14#include <linux/sizes.h>
     15#include <linux/cma.h>
     16#include <linux/bitops.h>
     17
     18#include <asm/cputable.h>
     19#include <asm/interrupt.h>
     20#include <asm/kvm_ppc.h>
     21#include <asm/kvm_book3s.h>
     22#include <asm/archrandom.h>
     23#include <asm/xics.h>
     24#include <asm/xive.h>
     25#include <asm/dbell.h>
     26#include <asm/cputhreads.h>
     27#include <asm/io.h>
     28#include <asm/opal.h>
     29#include <asm/smp.h>
     30
     31#define KVM_CMA_CHUNK_ORDER	18
     32
     33#include "book3s_xics.h"
     34#include "book3s_xive.h"
     35
     36/*
     37 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
     38 * should be power of 2.
     39 */
     40#define HPT_ALIGN_PAGES		((1 << 18) >> PAGE_SHIFT) /* 256k */
     41/*
     42 * By default we reserve 5% of memory for hash pagetable allocation.
     43 */
     44static unsigned long kvm_cma_resv_ratio = 5;
     45
     46static struct cma *kvm_cma;
     47
     48static int __init early_parse_kvm_cma_resv(char *p)
     49{
     50	pr_debug("%s(%s)\n", __func__, p);
     51	if (!p)
     52		return -EINVAL;
     53	return kstrtoul(p, 0, &kvm_cma_resv_ratio);
     54}
     55early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
     56
     57struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
     58{
     59	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
     60
     61	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
     62			 false);
     63}
     64EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
     65
     66void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
     67{
     68	cma_release(kvm_cma, page, nr_pages);
     69}
     70EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
     71
     72/**
     73 * kvm_cma_reserve() - reserve area for kvm hash pagetable
     74 *
     75 * This function reserves memory from early allocator. It should be
     76 * called by arch specific code once the memblock allocator
     77 * has been activated and all other subsystems have already allocated/reserved
     78 * memory.
     79 */
     80void __init kvm_cma_reserve(void)
     81{
     82	unsigned long align_size;
     83	phys_addr_t selected_size;
     84
     85	/*
     86	 * We need CMA reservation only when we are in HV mode
     87	 */
     88	if (!cpu_has_feature(CPU_FTR_HVMODE))
     89		return;
     90
     91	selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
     92	if (selected_size) {
     93		pr_info("%s: reserving %ld MiB for global area\n", __func__,
     94			 (unsigned long)selected_size / SZ_1M);
     95		align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
     96		cma_declare_contiguous(0, selected_size, 0, align_size,
     97			KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
     98			&kvm_cma);
     99	}
    100}
    101
    102/*
    103 * Real-mode H_CONFER implementation.
    104 * We check if we are the only vcpu out of this virtual core
    105 * still running in the guest and not ceded.  If so, we pop up
    106 * to the virtual-mode implementation; if not, just return to
    107 * the guest.
    108 */
    109long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
    110			    unsigned int yield_count)
    111{
    112	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
    113	int ptid = local_paca->kvm_hstate.ptid;
    114	int threads_running;
    115	int threads_ceded;
    116	int threads_conferring;
    117	u64 stop = get_tb() + 10 * tb_ticks_per_usec;
    118	int rv = H_SUCCESS; /* => don't yield */
    119
    120	set_bit(ptid, &vc->conferring_threads);
    121	while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
    122		threads_running = VCORE_ENTRY_MAP(vc);
    123		threads_ceded = vc->napping_threads;
    124		threads_conferring = vc->conferring_threads;
    125		if ((threads_ceded | threads_conferring) == threads_running) {
    126			rv = H_TOO_HARD; /* => do yield */
    127			break;
    128		}
    129	}
    130	clear_bit(ptid, &vc->conferring_threads);
    131	return rv;
    132}
    133
    134/*
    135 * When running HV mode KVM we need to block certain operations while KVM VMs
    136 * exist in the system. We use a counter of VMs to track this.
    137 *
    138 * One of the operations we need to block is onlining of secondaries, so we
    139 * protect hv_vm_count with cpus_read_lock/unlock().
    140 */
    141static atomic_t hv_vm_count;
    142
    143void kvm_hv_vm_activated(void)
    144{
    145	cpus_read_lock();
    146	atomic_inc(&hv_vm_count);
    147	cpus_read_unlock();
    148}
    149EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
    150
    151void kvm_hv_vm_deactivated(void)
    152{
    153	cpus_read_lock();
    154	atomic_dec(&hv_vm_count);
    155	cpus_read_unlock();
    156}
    157EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
    158
    159bool kvm_hv_mode_active(void)
    160{
    161	return atomic_read(&hv_vm_count) != 0;
    162}
    163
    164extern int hcall_real_table[], hcall_real_table_end[];
    165
    166int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
    167{
    168	cmd /= 4;
    169	if (cmd < hcall_real_table_end - hcall_real_table &&
    170	    hcall_real_table[cmd])
    171		return 1;
    172
    173	return 0;
    174}
    175EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
    176
    177int kvmppc_hwrng_present(void)
    178{
    179	return powernv_hwrng_present();
    180}
    181EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
    182
    183long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
    184{
    185	if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]))
    186		return H_SUCCESS;
    187
    188	return H_HARDWARE;
    189}
    190
    191/*
    192 * Send an interrupt or message to another CPU.
    193 * The caller needs to include any barrier needed to order writes
    194 * to memory vs. the IPI/message.
    195 */
    196void kvmhv_rm_send_ipi(int cpu)
    197{
    198	void __iomem *xics_phys;
    199	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
    200
    201	/* On POWER9 we can use msgsnd for any destination cpu. */
    202	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
    203		msg |= get_hard_smp_processor_id(cpu);
    204		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
    205		return;
    206	}
    207
    208	/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
    209	if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
    210	    cpu_first_thread_sibling(cpu) ==
    211	    cpu_first_thread_sibling(raw_smp_processor_id())) {
    212		msg |= cpu_thread_in_core(cpu);
    213		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
    214		return;
    215	}
    216
    217	/* We should never reach this */
    218	if (WARN_ON_ONCE(xics_on_xive()))
    219	    return;
    220
    221	/* Else poke the target with an IPI */
    222	xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
    223	if (xics_phys)
    224		__raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
    225	else
    226		opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
    227}
    228
    229/*
    230 * The following functions are called from the assembly code
    231 * in book3s_hv_rmhandlers.S.
    232 */
    233static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
    234{
    235	int cpu = vc->pcpu;
    236
    237	/* Order setting of exit map vs. msgsnd/IPI */
    238	smp_mb();
    239	for (; active; active >>= 1, ++cpu)
    240		if (active & 1)
    241			kvmhv_rm_send_ipi(cpu);
    242}
    243
    244void kvmhv_commence_exit(int trap)
    245{
    246	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
    247	int ptid = local_paca->kvm_hstate.ptid;
    248	struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
    249	int me, ee, i;
    250
    251	/* Set our bit in the threads-exiting-guest map in the 0xff00
    252	   bits of vcore->entry_exit_map */
    253	me = 0x100 << ptid;
    254	do {
    255		ee = vc->entry_exit_map;
    256	} while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
    257
    258	/* Are we the first here? */
    259	if ((ee >> 8) != 0)
    260		return;
    261
    262	/*
    263	 * Trigger the other threads in this vcore to exit the guest.
    264	 * If this is a hypervisor decrementer interrupt then they
    265	 * will be already on their way out of the guest.
    266	 */
    267	if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
    268		kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
    269
    270	/*
    271	 * If we are doing dynamic micro-threading, interrupt the other
    272	 * subcores to pull them out of their guests too.
    273	 */
    274	if (!sip)
    275		return;
    276
    277	for (i = 0; i < MAX_SUBCORES; ++i) {
    278		vc = sip->vc[i];
    279		if (!vc)
    280			break;
    281		do {
    282			ee = vc->entry_exit_map;
    283			/* Already asked to exit? */
    284			if ((ee >> 8) != 0)
    285				break;
    286		} while (cmpxchg(&vc->entry_exit_map, ee,
    287				 ee | VCORE_EXIT_REQ) != ee);
    288		if ((ee >> 8) == 0)
    289			kvmhv_interrupt_vcore(vc, ee);
    290	}
    291}
    292
    293struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
    294EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
    295
    296#ifdef CONFIG_KVM_XICS
    297static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
    298					 u32 xisr)
    299{
    300	int i;
    301
    302	/*
    303	 * We access the mapped array here without a lock.  That
    304	 * is safe because we never reduce the number of entries
    305	 * in the array and we never change the v_hwirq field of
    306	 * an entry once it is set.
    307	 *
    308	 * We have also carefully ordered the stores in the writer
    309	 * and the loads here in the reader, so that if we find a matching
    310	 * hwirq here, the associated GSI and irq_desc fields are valid.
    311	 */
    312	for (i = 0; i < pimap->n_mapped; i++)  {
    313		if (xisr == pimap->mapped[i].r_hwirq) {
    314			/*
    315			 * Order subsequent reads in the caller to serialize
    316			 * with the writer.
    317			 */
    318			smp_rmb();
    319			return &pimap->mapped[i];
    320		}
    321	}
    322	return NULL;
    323}
    324
    325/*
    326 * If we have an interrupt that's not an IPI, check if we have a
    327 * passthrough adapter and if so, check if this external interrupt
    328 * is for the adapter.
    329 * We will attempt to deliver the IRQ directly to the target VCPU's
    330 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
    331 *
    332 * If the delivery fails or if this is not for a passthrough adapter,
    333 * return to the host to handle this interrupt. We earlier
    334 * saved a copy of the XIRR in the PACA, it will be picked up by
    335 * the host ICP driver.
    336 */
    337static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
    338{
    339	struct kvmppc_passthru_irqmap *pimap;
    340	struct kvmppc_irq_map *irq_map;
    341	struct kvm_vcpu *vcpu;
    342
    343	vcpu = local_paca->kvm_hstate.kvm_vcpu;
    344	if (!vcpu)
    345		return 1;
    346	pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
    347	if (!pimap)
    348		return 1;
    349	irq_map = get_irqmap(pimap, xisr);
    350	if (!irq_map)
    351		return 1;
    352
    353	/* We're handling this interrupt, generic code doesn't need to */
    354	local_paca->kvm_hstate.saved_xirr = 0;
    355
    356	return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
    357}
    358
    359#else
    360static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
    361{
    362	return 1;
    363}
    364#endif
    365
    366/*
    367 * Determine what sort of external interrupt is pending (if any).
    368 * Returns:
    369 *	0 if no interrupt is pending
    370 *	1 if an interrupt is pending that needs to be handled by the host
    371 *	2 Passthrough that needs completion in the host
    372 *	-1 if there was a guest wakeup IPI (which has now been cleared)
    373 *	-2 if there is PCI passthrough external interrupt that was handled
    374 */
    375static long kvmppc_read_one_intr(bool *again);
    376
    377long kvmppc_read_intr(void)
    378{
    379	long ret = 0;
    380	long rc;
    381	bool again;
    382
    383	if (xive_enabled())
    384		return 1;
    385
    386	do {
    387		again = false;
    388		rc = kvmppc_read_one_intr(&again);
    389		if (rc && (ret == 0 || rc > ret))
    390			ret = rc;
    391	} while (again);
    392	return ret;
    393}
    394
    395static long kvmppc_read_one_intr(bool *again)
    396{
    397	void __iomem *xics_phys;
    398	u32 h_xirr;
    399	__be32 xirr;
    400	u32 xisr;
    401	u8 host_ipi;
    402	int64_t rc;
    403
    404	if (xive_enabled())
    405		return 1;
    406
    407	/* see if a host IPI is pending */
    408	host_ipi = local_paca->kvm_hstate.host_ipi;
    409	if (host_ipi)
    410		return 1;
    411
    412	/* Now read the interrupt from the ICP */
    413	xics_phys = local_paca->kvm_hstate.xics_phys;
    414	rc = 0;
    415	if (!xics_phys)
    416		rc = opal_int_get_xirr(&xirr, false);
    417	else
    418		xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
    419	if (rc < 0)
    420		return 1;
    421
    422	/*
    423	 * Save XIRR for later. Since we get control in reverse endian
    424	 * on LE systems, save it byte reversed and fetch it back in
    425	 * host endian. Note that xirr is the value read from the
    426	 * XIRR register, while h_xirr is the host endian version.
    427	 */
    428	h_xirr = be32_to_cpu(xirr);
    429	local_paca->kvm_hstate.saved_xirr = h_xirr;
    430	xisr = h_xirr & 0xffffff;
    431	/*
    432	 * Ensure that the store/load complete to guarantee all side
    433	 * effects of loading from XIRR has completed
    434	 */
    435	smp_mb();
    436
    437	/* if nothing pending in the ICP */
    438	if (!xisr)
    439		return 0;
    440
    441	/* We found something in the ICP...
    442	 *
    443	 * If it is an IPI, clear the MFRR and EOI it.
    444	 */
    445	if (xisr == XICS_IPI) {
    446		rc = 0;
    447		if (xics_phys) {
    448			__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
    449			__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
    450		} else {
    451			opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
    452			rc = opal_int_eoi(h_xirr);
    453		}
    454		/* If rc > 0, there is another interrupt pending */
    455		*again = rc > 0;
    456
    457		/*
    458		 * Need to ensure side effects of above stores
    459		 * complete before proceeding.
    460		 */
    461		smp_mb();
    462
    463		/*
    464		 * We need to re-check host IPI now in case it got set in the
    465		 * meantime. If it's clear, we bounce the interrupt to the
    466		 * guest
    467		 */
    468		host_ipi = local_paca->kvm_hstate.host_ipi;
    469		if (unlikely(host_ipi != 0)) {
    470			/* We raced with the host,
    471			 * we need to resend that IPI, bummer
    472			 */
    473			if (xics_phys)
    474				__raw_rm_writeb(IPI_PRIORITY,
    475						xics_phys + XICS_MFRR);
    476			else
    477				opal_int_set_mfrr(hard_smp_processor_id(),
    478						  IPI_PRIORITY);
    479			/* Let side effects complete */
    480			smp_mb();
    481			return 1;
    482		}
    483
    484		/* OK, it's an IPI for us */
    485		local_paca->kvm_hstate.saved_xirr = 0;
    486		return -1;
    487	}
    488
    489	return kvmppc_check_passthru(xisr, xirr, again);
    490}
    491
    492void kvmppc_bad_interrupt(struct pt_regs *regs)
    493{
    494	/*
    495	 * 100 could happen at any time, 200 can happen due to invalid real
    496	 * address access for example (or any time due to a hardware problem).
    497	 */
    498	if (TRAP(regs) == 0x100) {
    499		get_paca()->in_nmi++;
    500		system_reset_exception(regs);
    501		get_paca()->in_nmi--;
    502	} else if (TRAP(regs) == 0x200) {
    503		machine_check_exception(regs);
    504	} else {
    505		die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
    506	}
    507	panic("Bad KVM trap");
    508}
    509
    510static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
    511{
    512	vcpu->arch.ceded = 0;
    513	if (vcpu->arch.timer_running) {
    514		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
    515		vcpu->arch.timer_running = 0;
    516	}
    517}
    518
    519void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
    520{
    521	/* Guest must always run with ME enabled, HV disabled. */
    522	msr = (msr | MSR_ME) & ~MSR_HV;
    523
    524	/*
    525	 * Check for illegal transactional state bit combination
    526	 * and if we find it, force the TS field to a safe state.
    527	 */
    528	if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
    529		msr &= ~MSR_TS_MASK;
    530	vcpu->arch.shregs.msr = msr;
    531	kvmppc_end_cede(vcpu);
    532}
    533EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
    534
    535static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
    536{
    537	unsigned long msr, pc, new_msr, new_pc;
    538
    539	msr = kvmppc_get_msr(vcpu);
    540	pc = kvmppc_get_pc(vcpu);
    541	new_msr = vcpu->arch.intr_msr;
    542	new_pc = vec;
    543
    544	/* If transactional, change to suspend mode on IRQ delivery */
    545	if (MSR_TM_TRANSACTIONAL(msr))
    546		new_msr |= MSR_TS_S;
    547	else
    548		new_msr |= msr & MSR_TS_MASK;
    549
    550	/*
    551	 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
    552	 * applicable. AIL=2 is not supported.
    553	 *
    554	 * AIL does not apply to SRESET, MCE, or HMI (which is never
    555	 * delivered to the guest), and does not apply if IR=0 or DR=0.
    556	 */
    557	if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
    558	    vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
    559	    (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
    560	    (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
    561		new_msr |= MSR_IR | MSR_DR;
    562		new_pc += 0xC000000000004000ULL;
    563	}
    564
    565	kvmppc_set_srr0(vcpu, pc);
    566	kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
    567	kvmppc_set_pc(vcpu, new_pc);
    568	vcpu->arch.shregs.msr = new_msr;
    569}
    570
    571void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
    572{
    573	inject_interrupt(vcpu, vec, srr1_flags);
    574	kvmppc_end_cede(vcpu);
    575}
    576EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
    577
    578/*
    579 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
    580 * Can we inject a Decrementer or a External interrupt?
    581 */
    582void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
    583{
    584	int ext;
    585	unsigned long lpcr;
    586
    587	WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
    588
    589	/* Insert EXTERNAL bit into LPCR at the MER bit position */
    590	ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
    591	lpcr = mfspr(SPRN_LPCR);
    592	lpcr |= ext << LPCR_MER_SH;
    593	mtspr(SPRN_LPCR, lpcr);
    594	isync();
    595
    596	if (vcpu->arch.shregs.msr & MSR_EE) {
    597		if (ext) {
    598			inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
    599		} else {
    600			long int dec = mfspr(SPRN_DEC);
    601			if (!(lpcr & LPCR_LD))
    602				dec = (int) dec;
    603			if (dec < 0)
    604				inject_interrupt(vcpu,
    605					BOOK3S_INTERRUPT_DECREMENTER, 0);
    606		}
    607	}
    608
    609	if (vcpu->arch.doorbell_request) {
    610		mtspr(SPRN_DPDES, 1);
    611		vcpu->arch.vcore->dpdes = 1;
    612		smp_wmb();
    613		vcpu->arch.doorbell_request = 0;
    614	}
    615}
    616
    617static void flush_guest_tlb(struct kvm *kvm)
    618{
    619	unsigned long rb, set;
    620
    621	rb = PPC_BIT(52);	/* IS = 2 */
    622	for (set = 0; set < kvm->arch.tlb_sets; ++set) {
    623		/* R=0 PRS=0 RIC=0 */
    624		asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
    625			     : : "r" (rb), "i" (0), "i" (0), "i" (0),
    626			       "r" (0) : "memory");
    627		rb += PPC_BIT(51);	/* increment set number */
    628	}
    629	asm volatile("ptesync": : :"memory");
    630}
    631
    632void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu)
    633{
    634	if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) {
    635		flush_guest_tlb(kvm);
    636
    637		/* Clear the bit after the TLB flush */
    638		cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush);
    639	}
    640}
    641EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);