cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

powerpc.c (59002B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *
      4 * Copyright IBM Corp. 2007
      5 *
      6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
      7 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
      8 */
      9
     10#include <linux/errno.h>
     11#include <linux/err.h>
     12#include <linux/kvm_host.h>
     13#include <linux/vmalloc.h>
     14#include <linux/hrtimer.h>
     15#include <linux/sched/signal.h>
     16#include <linux/fs.h>
     17#include <linux/slab.h>
     18#include <linux/file.h>
     19#include <linux/module.h>
     20#include <linux/irqbypass.h>
     21#include <linux/kvm_irqfd.h>
     22#include <linux/of.h>
     23#include <asm/cputable.h>
     24#include <linux/uaccess.h>
     25#include <asm/kvm_ppc.h>
     26#include <asm/cputhreads.h>
     27#include <asm/irqflags.h>
     28#include <asm/iommu.h>
     29#include <asm/switch_to.h>
     30#include <asm/xive.h>
     31#ifdef CONFIG_PPC_PSERIES
     32#include <asm/hvcall.h>
     33#include <asm/plpar_wrappers.h>
     34#endif
     35#include <asm/ultravisor.h>
     36
     37#include "timing.h"
     38#include "irq.h"
     39#include "../mm/mmu_decl.h"
     40
     41#define CREATE_TRACE_POINTS
     42#include "trace.h"
     43
     44struct kvmppc_ops *kvmppc_hv_ops;
     45EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
     46struct kvmppc_ops *kvmppc_pr_ops;
     47EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
     48
     49
     50int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
     51{
     52	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
     53}
     54
     55bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
     56{
     57	return kvm_arch_vcpu_runnable(vcpu);
     58}
     59
     60bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
     61{
     62	return false;
     63}
     64
     65int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
     66{
     67	return 1;
     68}
     69
     70/*
     71 * Common checks before entering the guest world.  Call with interrupts
     72 * disabled.
     73 *
     74 * returns:
     75 *
     76 * == 1 if we're ready to go into guest state
     77 * <= 0 if we need to go back to the host with return value
     78 */
     79int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
     80{
     81	int r;
     82
     83	WARN_ON(irqs_disabled());
     84	hard_irq_disable();
     85
     86	while (true) {
     87		if (need_resched()) {
     88			local_irq_enable();
     89			cond_resched();
     90			hard_irq_disable();
     91			continue;
     92		}
     93
     94		if (signal_pending(current)) {
     95			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
     96			vcpu->run->exit_reason = KVM_EXIT_INTR;
     97			r = -EINTR;
     98			break;
     99		}
    100
    101		vcpu->mode = IN_GUEST_MODE;
    102
    103		/*
    104		 * Reading vcpu->requests must happen after setting vcpu->mode,
    105		 * so we don't miss a request because the requester sees
    106		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
    107		 * before next entering the guest (and thus doesn't IPI).
    108		 * This also orders the write to mode from any reads
    109		 * to the page tables done while the VCPU is running.
    110		 * Please see the comment in kvm_flush_remote_tlbs.
    111		 */
    112		smp_mb();
    113
    114		if (kvm_request_pending(vcpu)) {
    115			/* Make sure we process requests preemptable */
    116			local_irq_enable();
    117			trace_kvm_check_requests(vcpu);
    118			r = kvmppc_core_check_requests(vcpu);
    119			hard_irq_disable();
    120			if (r > 0)
    121				continue;
    122			break;
    123		}
    124
    125		if (kvmppc_core_prepare_to_enter(vcpu)) {
    126			/* interrupts got enabled in between, so we
    127			   are back at square 1 */
    128			continue;
    129		}
    130
    131		guest_enter_irqoff();
    132		return 1;
    133	}
    134
    135	/* return to host */
    136	local_irq_enable();
    137	return r;
    138}
    139EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
    140
    141#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
    142static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
    143{
    144	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
    145	int i;
    146
    147	shared->sprg0 = swab64(shared->sprg0);
    148	shared->sprg1 = swab64(shared->sprg1);
    149	shared->sprg2 = swab64(shared->sprg2);
    150	shared->sprg3 = swab64(shared->sprg3);
    151	shared->srr0 = swab64(shared->srr0);
    152	shared->srr1 = swab64(shared->srr1);
    153	shared->dar = swab64(shared->dar);
    154	shared->msr = swab64(shared->msr);
    155	shared->dsisr = swab32(shared->dsisr);
    156	shared->int_pending = swab32(shared->int_pending);
    157	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
    158		shared->sr[i] = swab32(shared->sr[i]);
    159}
    160#endif
    161
    162int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
    163{
    164	int nr = kvmppc_get_gpr(vcpu, 11);
    165	int r;
    166	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
    167	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
    168	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
    169	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
    170	unsigned long r2 = 0;
    171
    172	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
    173		/* 32 bit mode */
    174		param1 &= 0xffffffff;
    175		param2 &= 0xffffffff;
    176		param3 &= 0xffffffff;
    177		param4 &= 0xffffffff;
    178	}
    179
    180	switch (nr) {
    181	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
    182	{
    183#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
    184		/* Book3S can be little endian, find it out here */
    185		int shared_big_endian = true;
    186		if (vcpu->arch.intr_msr & MSR_LE)
    187			shared_big_endian = false;
    188		if (shared_big_endian != vcpu->arch.shared_big_endian)
    189			kvmppc_swab_shared(vcpu);
    190		vcpu->arch.shared_big_endian = shared_big_endian;
    191#endif
    192
    193		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
    194			/*
    195			 * Older versions of the Linux magic page code had
    196			 * a bug where they would map their trampoline code
    197			 * NX. If that's the case, remove !PR NX capability.
    198			 */
    199			vcpu->arch.disable_kernel_nx = true;
    200			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
    201		}
    202
    203		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
    204		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
    205
    206#ifdef CONFIG_PPC_64K_PAGES
    207		/*
    208		 * Make sure our 4k magic page is in the same window of a 64k
    209		 * page within the guest and within the host's page.
    210		 */
    211		if ((vcpu->arch.magic_page_pa & 0xf000) !=
    212		    ((ulong)vcpu->arch.shared & 0xf000)) {
    213			void *old_shared = vcpu->arch.shared;
    214			ulong shared = (ulong)vcpu->arch.shared;
    215			void *new_shared;
    216
    217			shared &= PAGE_MASK;
    218			shared |= vcpu->arch.magic_page_pa & 0xf000;
    219			new_shared = (void*)shared;
    220			memcpy(new_shared, old_shared, 0x1000);
    221			vcpu->arch.shared = new_shared;
    222		}
    223#endif
    224
    225		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
    226
    227		r = EV_SUCCESS;
    228		break;
    229	}
    230	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
    231		r = EV_SUCCESS;
    232#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
    233		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
    234#endif
    235
    236		/* Second return value is in r4 */
    237		break;
    238	case EV_HCALL_TOKEN(EV_IDLE):
    239		r = EV_SUCCESS;
    240		kvm_vcpu_halt(vcpu);
    241		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
    242		break;
    243	default:
    244		r = EV_UNIMPLEMENTED;
    245		break;
    246	}
    247
    248	kvmppc_set_gpr(vcpu, 4, r2);
    249
    250	return r;
    251}
    252EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
    253
    254int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
    255{
    256	int r = false;
    257
    258	/* We have to know what CPU to virtualize */
    259	if (!vcpu->arch.pvr)
    260		goto out;
    261
    262	/* PAPR only works with book3s_64 */
    263	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
    264		goto out;
    265
    266	/* HV KVM can only do PAPR mode for now */
    267	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
    268		goto out;
    269
    270#ifdef CONFIG_KVM_BOOKE_HV
    271	if (!cpu_has_feature(CPU_FTR_EMB_HV))
    272		goto out;
    273#endif
    274
    275	r = true;
    276
    277out:
    278	vcpu->arch.sane = r;
    279	return r ? 0 : -EINVAL;
    280}
    281EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
    282
    283int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
    284{
    285	enum emulation_result er;
    286	int r;
    287
    288	er = kvmppc_emulate_loadstore(vcpu);
    289	switch (er) {
    290	case EMULATE_DONE:
    291		/* Future optimization: only reload non-volatiles if they were
    292		 * actually modified. */
    293		r = RESUME_GUEST_NV;
    294		break;
    295	case EMULATE_AGAIN:
    296		r = RESUME_GUEST;
    297		break;
    298	case EMULATE_DO_MMIO:
    299		vcpu->run->exit_reason = KVM_EXIT_MMIO;
    300		/* We must reload nonvolatiles because "update" load/store
    301		 * instructions modify register state. */
    302		/* Future optimization: only reload non-volatiles if they were
    303		 * actually modified. */
    304		r = RESUME_HOST_NV;
    305		break;
    306	case EMULATE_FAIL:
    307	{
    308		u32 last_inst;
    309
    310		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
    311		kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
    312				      last_inst);
    313
    314		/*
    315		 * Injecting a Data Storage here is a bit more
    316		 * accurate since the instruction that caused the
    317		 * access could still be a valid one.
    318		 */
    319		if (!IS_ENABLED(CONFIG_BOOKE)) {
    320			ulong dsisr = DSISR_BADACCESS;
    321
    322			if (vcpu->mmio_is_write)
    323				dsisr |= DSISR_ISSTORE;
    324
    325			kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
    326		} else {
    327			/*
    328			 * BookE does not send a SIGBUS on a bad
    329			 * fault, so use a Program interrupt instead
    330			 * to avoid a fault loop.
    331			 */
    332			kvmppc_core_queue_program(vcpu, 0);
    333		}
    334
    335		r = RESUME_GUEST;
    336		break;
    337	}
    338	default:
    339		WARN_ON(1);
    340		r = RESUME_GUEST;
    341	}
    342
    343	return r;
    344}
    345EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
    346
    347int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
    348	      bool data)
    349{
    350	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
    351	struct kvmppc_pte pte;
    352	int r = -EINVAL;
    353
    354	vcpu->stat.st++;
    355
    356	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
    357		r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
    358							    size);
    359
    360	if ((!r) || (r == -EAGAIN))
    361		return r;
    362
    363	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
    364			 XLATE_WRITE, &pte);
    365	if (r < 0)
    366		return r;
    367
    368	*eaddr = pte.raddr;
    369
    370	if (!pte.may_write)
    371		return -EPERM;
    372
    373	/* Magic page override */
    374	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
    375	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
    376	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
    377		void *magic = vcpu->arch.shared;
    378		magic += pte.eaddr & 0xfff;
    379		memcpy(magic, ptr, size);
    380		return EMULATE_DONE;
    381	}
    382
    383	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
    384		return EMULATE_DO_MMIO;
    385
    386	return EMULATE_DONE;
    387}
    388EXPORT_SYMBOL_GPL(kvmppc_st);
    389
    390int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
    391		      bool data)
    392{
    393	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
    394	struct kvmppc_pte pte;
    395	int rc = -EINVAL;
    396
    397	vcpu->stat.ld++;
    398
    399	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
    400		rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
    401							      size);
    402
    403	if ((!rc) || (rc == -EAGAIN))
    404		return rc;
    405
    406	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
    407			  XLATE_READ, &pte);
    408	if (rc)
    409		return rc;
    410
    411	*eaddr = pte.raddr;
    412
    413	if (!pte.may_read)
    414		return -EPERM;
    415
    416	if (!data && !pte.may_execute)
    417		return -ENOEXEC;
    418
    419	/* Magic page override */
    420	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
    421	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
    422	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
    423		void *magic = vcpu->arch.shared;
    424		magic += pte.eaddr & 0xfff;
    425		memcpy(ptr, magic, size);
    426		return EMULATE_DONE;
    427	}
    428
    429	kvm_vcpu_srcu_read_lock(vcpu);
    430	rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
    431	kvm_vcpu_srcu_read_unlock(vcpu);
    432	if (rc)
    433		return EMULATE_DO_MMIO;
    434
    435	return EMULATE_DONE;
    436}
    437EXPORT_SYMBOL_GPL(kvmppc_ld);
    438
    439int kvm_arch_hardware_enable(void)
    440{
    441	return 0;
    442}
    443
    444int kvm_arch_hardware_setup(void *opaque)
    445{
    446	return 0;
    447}
    448
    449int kvm_arch_check_processor_compat(void *opaque)
    450{
    451	return kvmppc_core_check_processor_compat();
    452}
    453
    454int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
    455{
    456	struct kvmppc_ops *kvm_ops = NULL;
    457	int r;
    458
    459	/*
    460	 * if we have both HV and PR enabled, default is HV
    461	 */
    462	if (type == 0) {
    463		if (kvmppc_hv_ops)
    464			kvm_ops = kvmppc_hv_ops;
    465		else
    466			kvm_ops = kvmppc_pr_ops;
    467		if (!kvm_ops)
    468			goto err_out;
    469	} else	if (type == KVM_VM_PPC_HV) {
    470		if (!kvmppc_hv_ops)
    471			goto err_out;
    472		kvm_ops = kvmppc_hv_ops;
    473	} else if (type == KVM_VM_PPC_PR) {
    474		if (!kvmppc_pr_ops)
    475			goto err_out;
    476		kvm_ops = kvmppc_pr_ops;
    477	} else
    478		goto err_out;
    479
    480	if (!try_module_get(kvm_ops->owner))
    481		return -ENOENT;
    482
    483	kvm->arch.kvm_ops = kvm_ops;
    484	r = kvmppc_core_init_vm(kvm);
    485	if (r)
    486		module_put(kvm_ops->owner);
    487	return r;
    488err_out:
    489	return -EINVAL;
    490}
    491
    492void kvm_arch_destroy_vm(struct kvm *kvm)
    493{
    494#ifdef CONFIG_KVM_XICS
    495	/*
    496	 * We call kick_all_cpus_sync() to ensure that all
    497	 * CPUs have executed any pending IPIs before we
    498	 * continue and free VCPUs structures below.
    499	 */
    500	if (is_kvmppc_hv_enabled(kvm))
    501		kick_all_cpus_sync();
    502#endif
    503
    504	kvm_destroy_vcpus(kvm);
    505
    506	mutex_lock(&kvm->lock);
    507
    508	kvmppc_core_destroy_vm(kvm);
    509
    510	mutex_unlock(&kvm->lock);
    511
    512	/* drop the module reference */
    513	module_put(kvm->arch.kvm_ops->owner);
    514}
    515
    516int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
    517{
    518	int r;
    519	/* Assume we're using HV mode when the HV module is loaded */
    520	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
    521
    522	if (kvm) {
    523		/*
    524		 * Hooray - we know which VM type we're running on. Depend on
    525		 * that rather than the guess above.
    526		 */
    527		hv_enabled = is_kvmppc_hv_enabled(kvm);
    528	}
    529
    530	switch (ext) {
    531#ifdef CONFIG_BOOKE
    532	case KVM_CAP_PPC_BOOKE_SREGS:
    533	case KVM_CAP_PPC_BOOKE_WATCHDOG:
    534	case KVM_CAP_PPC_EPR:
    535#else
    536	case KVM_CAP_PPC_SEGSTATE:
    537	case KVM_CAP_PPC_HIOR:
    538	case KVM_CAP_PPC_PAPR:
    539#endif
    540	case KVM_CAP_PPC_UNSET_IRQ:
    541	case KVM_CAP_PPC_IRQ_LEVEL:
    542	case KVM_CAP_ENABLE_CAP:
    543	case KVM_CAP_ONE_REG:
    544	case KVM_CAP_IOEVENTFD:
    545	case KVM_CAP_DEVICE_CTRL:
    546	case KVM_CAP_IMMEDIATE_EXIT:
    547	case KVM_CAP_SET_GUEST_DEBUG:
    548		r = 1;
    549		break;
    550	case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
    551	case KVM_CAP_PPC_PAIRED_SINGLES:
    552	case KVM_CAP_PPC_OSI:
    553	case KVM_CAP_PPC_GET_PVINFO:
    554#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
    555	case KVM_CAP_SW_TLB:
    556#endif
    557		/* We support this only for PR */
    558		r = !hv_enabled;
    559		break;
    560#ifdef CONFIG_KVM_MPIC
    561	case KVM_CAP_IRQ_MPIC:
    562		r = 1;
    563		break;
    564#endif
    565
    566#ifdef CONFIG_PPC_BOOK3S_64
    567	case KVM_CAP_SPAPR_TCE:
    568	case KVM_CAP_SPAPR_TCE_64:
    569		r = 1;
    570		break;
    571	case KVM_CAP_SPAPR_TCE_VFIO:
    572		r = !!cpu_has_feature(CPU_FTR_HVMODE);
    573		break;
    574	case KVM_CAP_PPC_RTAS:
    575	case KVM_CAP_PPC_FIXUP_HCALL:
    576	case KVM_CAP_PPC_ENABLE_HCALL:
    577#ifdef CONFIG_KVM_XICS
    578	case KVM_CAP_IRQ_XICS:
    579#endif
    580	case KVM_CAP_PPC_GET_CPU_CHAR:
    581		r = 1;
    582		break;
    583#ifdef CONFIG_KVM_XIVE
    584	case KVM_CAP_PPC_IRQ_XIVE:
    585		/*
    586		 * We need XIVE to be enabled on the platform (implies
    587		 * a POWER9 processor) and the PowerNV platform, as
    588		 * nested is not yet supported.
    589		 */
    590		r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
    591			kvmppc_xive_native_supported();
    592		break;
    593#endif
    594
    595	case KVM_CAP_PPC_ALLOC_HTAB:
    596		r = hv_enabled;
    597		break;
    598#endif /* CONFIG_PPC_BOOK3S_64 */
    599#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
    600	case KVM_CAP_PPC_SMT:
    601		r = 0;
    602		if (kvm) {
    603			if (kvm->arch.emul_smt_mode > 1)
    604				r = kvm->arch.emul_smt_mode;
    605			else
    606				r = kvm->arch.smt_mode;
    607		} else if (hv_enabled) {
    608			if (cpu_has_feature(CPU_FTR_ARCH_300))
    609				r = 1;
    610			else
    611				r = threads_per_subcore;
    612		}
    613		break;
    614	case KVM_CAP_PPC_SMT_POSSIBLE:
    615		r = 1;
    616		if (hv_enabled) {
    617			if (!cpu_has_feature(CPU_FTR_ARCH_300))
    618				r = ((threads_per_subcore << 1) - 1);
    619			else
    620				/* P9 can emulate dbells, so allow any mode */
    621				r = 8 | 4 | 2 | 1;
    622		}
    623		break;
    624	case KVM_CAP_PPC_RMA:
    625		r = 0;
    626		break;
    627	case KVM_CAP_PPC_HWRNG:
    628		r = kvmppc_hwrng_present();
    629		break;
    630	case KVM_CAP_PPC_MMU_RADIX:
    631		r = !!(hv_enabled && radix_enabled());
    632		break;
    633	case KVM_CAP_PPC_MMU_HASH_V3:
    634		r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
    635		       kvmppc_hv_ops->hash_v3_possible());
    636		break;
    637	case KVM_CAP_PPC_NESTED_HV:
    638		r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
    639		       !kvmppc_hv_ops->enable_nested(NULL));
    640		break;
    641#endif
    642	case KVM_CAP_SYNC_MMU:
    643#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
    644		r = hv_enabled;
    645#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
    646		r = 1;
    647#else
    648		r = 0;
    649#endif
    650		break;
    651#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
    652	case KVM_CAP_PPC_HTAB_FD:
    653		r = hv_enabled;
    654		break;
    655#endif
    656	case KVM_CAP_NR_VCPUS:
    657		/*
    658		 * Recommending a number of CPUs is somewhat arbitrary; we
    659		 * return the number of present CPUs for -HV (since a host
    660		 * will have secondary threads "offline"), and for other KVM
    661		 * implementations just count online CPUs.
    662		 */
    663		if (hv_enabled)
    664			r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
    665		else
    666			r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
    667		break;
    668	case KVM_CAP_MAX_VCPUS:
    669		r = KVM_MAX_VCPUS;
    670		break;
    671	case KVM_CAP_MAX_VCPU_ID:
    672		r = KVM_MAX_VCPU_IDS;
    673		break;
    674#ifdef CONFIG_PPC_BOOK3S_64
    675	case KVM_CAP_PPC_GET_SMMU_INFO:
    676		r = 1;
    677		break;
    678	case KVM_CAP_SPAPR_MULTITCE:
    679		r = 1;
    680		break;
    681	case KVM_CAP_SPAPR_RESIZE_HPT:
    682		r = !!hv_enabled;
    683		break;
    684#endif
    685#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
    686	case KVM_CAP_PPC_FWNMI:
    687		r = hv_enabled;
    688		break;
    689#endif
    690#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
    691	case KVM_CAP_PPC_HTM:
    692		r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
    693		     (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
    694		break;
    695#endif
    696#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
    697	case KVM_CAP_PPC_SECURE_GUEST:
    698		r = hv_enabled && kvmppc_hv_ops->enable_svm &&
    699			!kvmppc_hv_ops->enable_svm(NULL);
    700		break;
    701	case KVM_CAP_PPC_DAWR1:
    702		r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
    703		       !kvmppc_hv_ops->enable_dawr1(NULL));
    704		break;
    705	case KVM_CAP_PPC_RPT_INVALIDATE:
    706		r = 1;
    707		break;
    708#endif
    709	case KVM_CAP_PPC_AIL_MODE_3:
    710		r = 0;
    711		/*
    712		 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
    713		 * The POWER9s can support it if the guest runs in hash mode,
    714		 * but QEMU doesn't necessarily query the capability in time.
    715		 */
    716		if (hv_enabled) {
    717			if (kvmhv_on_pseries()) {
    718				if (pseries_reloc_on_exception())
    719					r = 1;
    720			} else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
    721				  !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
    722				r = 1;
    723			}
    724		}
    725		break;
    726	default:
    727		r = 0;
    728		break;
    729	}
    730	return r;
    731
    732}
    733
    734long kvm_arch_dev_ioctl(struct file *filp,
    735                        unsigned int ioctl, unsigned long arg)
    736{
    737	return -EINVAL;
    738}
    739
    740void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
    741{
    742	kvmppc_core_free_memslot(kvm, slot);
    743}
    744
    745int kvm_arch_prepare_memory_region(struct kvm *kvm,
    746				   const struct kvm_memory_slot *old,
    747				   struct kvm_memory_slot *new,
    748				   enum kvm_mr_change change)
    749{
    750	return kvmppc_core_prepare_memory_region(kvm, old, new, change);
    751}
    752
    753void kvm_arch_commit_memory_region(struct kvm *kvm,
    754				   struct kvm_memory_slot *old,
    755				   const struct kvm_memory_slot *new,
    756				   enum kvm_mr_change change)
    757{
    758	kvmppc_core_commit_memory_region(kvm, old, new, change);
    759}
    760
    761void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
    762				   struct kvm_memory_slot *slot)
    763{
    764	kvmppc_core_flush_memslot(kvm, slot);
    765}
    766
    767int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
    768{
    769	return 0;
    770}
    771
    772static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
    773{
    774	struct kvm_vcpu *vcpu;
    775
    776	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
    777	kvmppc_decrementer_func(vcpu);
    778
    779	return HRTIMER_NORESTART;
    780}
    781
    782int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
    783{
    784	int err;
    785
    786	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
    787	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
    788	vcpu->arch.dec_expires = get_tb();
    789
    790#ifdef CONFIG_KVM_EXIT_TIMING
    791	mutex_init(&vcpu->arch.exit_timing_lock);
    792#endif
    793	err = kvmppc_subarch_vcpu_init(vcpu);
    794	if (err)
    795		return err;
    796
    797	err = kvmppc_core_vcpu_create(vcpu);
    798	if (err)
    799		goto out_vcpu_uninit;
    800
    801	rcuwait_init(&vcpu->arch.wait);
    802	vcpu->arch.waitp = &vcpu->arch.wait;
    803	return 0;
    804
    805out_vcpu_uninit:
    806	kvmppc_subarch_vcpu_uninit(vcpu);
    807	return err;
    808}
    809
    810void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
    811{
    812}
    813
    814void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
    815{
    816	/* Make sure we're not using the vcpu anymore */
    817	hrtimer_cancel(&vcpu->arch.dec_timer);
    818
    819	switch (vcpu->arch.irq_type) {
    820	case KVMPPC_IRQ_MPIC:
    821		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
    822		break;
    823	case KVMPPC_IRQ_XICS:
    824		if (xics_on_xive())
    825			kvmppc_xive_cleanup_vcpu(vcpu);
    826		else
    827			kvmppc_xics_free_icp(vcpu);
    828		break;
    829	case KVMPPC_IRQ_XIVE:
    830		kvmppc_xive_native_cleanup_vcpu(vcpu);
    831		break;
    832	}
    833
    834	kvmppc_core_vcpu_free(vcpu);
    835
    836	kvmppc_subarch_vcpu_uninit(vcpu);
    837}
    838
    839int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
    840{
    841	return kvmppc_core_pending_dec(vcpu);
    842}
    843
    844void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
    845{
    846#ifdef CONFIG_BOOKE
    847	/*
    848	 * vrsave (formerly usprg0) isn't used by Linux, but may
    849	 * be used by the guest.
    850	 *
    851	 * On non-booke this is associated with Altivec and
    852	 * is handled by code in book3s.c.
    853	 */
    854	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
    855#endif
    856	kvmppc_core_vcpu_load(vcpu, cpu);
    857}
    858
    859void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
    860{
    861	kvmppc_core_vcpu_put(vcpu);
    862#ifdef CONFIG_BOOKE
    863	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
    864#endif
    865}
    866
    867/*
    868 * irq_bypass_add_producer and irq_bypass_del_producer are only
    869 * useful if the architecture supports PCI passthrough.
    870 * irq_bypass_stop and irq_bypass_start are not needed and so
    871 * kvm_ops are not defined for them.
    872 */
    873bool kvm_arch_has_irq_bypass(void)
    874{
    875	return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
    876		(kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
    877}
    878
    879int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
    880				     struct irq_bypass_producer *prod)
    881{
    882	struct kvm_kernel_irqfd *irqfd =
    883		container_of(cons, struct kvm_kernel_irqfd, consumer);
    884	struct kvm *kvm = irqfd->kvm;
    885
    886	if (kvm->arch.kvm_ops->irq_bypass_add_producer)
    887		return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
    888
    889	return 0;
    890}
    891
    892void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
    893				      struct irq_bypass_producer *prod)
    894{
    895	struct kvm_kernel_irqfd *irqfd =
    896		container_of(cons, struct kvm_kernel_irqfd, consumer);
    897	struct kvm *kvm = irqfd->kvm;
    898
    899	if (kvm->arch.kvm_ops->irq_bypass_del_producer)
    900		kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
    901}
    902
    903#ifdef CONFIG_VSX
    904static inline int kvmppc_get_vsr_dword_offset(int index)
    905{
    906	int offset;
    907
    908	if ((index != 0) && (index != 1))
    909		return -1;
    910
    911#ifdef __BIG_ENDIAN
    912	offset =  index;
    913#else
    914	offset = 1 - index;
    915#endif
    916
    917	return offset;
    918}
    919
    920static inline int kvmppc_get_vsr_word_offset(int index)
    921{
    922	int offset;
    923
    924	if ((index > 3) || (index < 0))
    925		return -1;
    926
    927#ifdef __BIG_ENDIAN
    928	offset = index;
    929#else
    930	offset = 3 - index;
    931#endif
    932	return offset;
    933}
    934
    935static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
    936	u64 gpr)
    937{
    938	union kvmppc_one_reg val;
    939	int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
    940	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
    941
    942	if (offset == -1)
    943		return;
    944
    945	if (index >= 32) {
    946		val.vval = VCPU_VSX_VR(vcpu, index - 32);
    947		val.vsxval[offset] = gpr;
    948		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
    949	} else {
    950		VCPU_VSX_FPR(vcpu, index, offset) = gpr;
    951	}
    952}
    953
    954static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
    955	u64 gpr)
    956{
    957	union kvmppc_one_reg val;
    958	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
    959
    960	if (index >= 32) {
    961		val.vval = VCPU_VSX_VR(vcpu, index - 32);
    962		val.vsxval[0] = gpr;
    963		val.vsxval[1] = gpr;
    964		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
    965	} else {
    966		VCPU_VSX_FPR(vcpu, index, 0) = gpr;
    967		VCPU_VSX_FPR(vcpu, index, 1) = gpr;
    968	}
    969}
    970
    971static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
    972	u32 gpr)
    973{
    974	union kvmppc_one_reg val;
    975	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
    976
    977	if (index >= 32) {
    978		val.vsx32val[0] = gpr;
    979		val.vsx32val[1] = gpr;
    980		val.vsx32val[2] = gpr;
    981		val.vsx32val[3] = gpr;
    982		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
    983	} else {
    984		val.vsx32val[0] = gpr;
    985		val.vsx32val[1] = gpr;
    986		VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
    987		VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
    988	}
    989}
    990
    991static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
    992	u32 gpr32)
    993{
    994	union kvmppc_one_reg val;
    995	int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
    996	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
    997	int dword_offset, word_offset;
    998
    999	if (offset == -1)
   1000		return;
   1001
   1002	if (index >= 32) {
   1003		val.vval = VCPU_VSX_VR(vcpu, index - 32);
   1004		val.vsx32val[offset] = gpr32;
   1005		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
   1006	} else {
   1007		dword_offset = offset / 2;
   1008		word_offset = offset % 2;
   1009		val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
   1010		val.vsx32val[word_offset] = gpr32;
   1011		VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
   1012	}
   1013}
   1014#endif /* CONFIG_VSX */
   1015
   1016#ifdef CONFIG_ALTIVEC
   1017static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
   1018		int index, int element_size)
   1019{
   1020	int offset;
   1021	int elts = sizeof(vector128)/element_size;
   1022
   1023	if ((index < 0) || (index >= elts))
   1024		return -1;
   1025
   1026	if (kvmppc_need_byteswap(vcpu))
   1027		offset = elts - index - 1;
   1028	else
   1029		offset = index;
   1030
   1031	return offset;
   1032}
   1033
   1034static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
   1035		int index)
   1036{
   1037	return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
   1038}
   1039
   1040static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
   1041		int index)
   1042{
   1043	return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
   1044}
   1045
   1046static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
   1047		int index)
   1048{
   1049	return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
   1050}
   1051
   1052static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
   1053		int index)
   1054{
   1055	return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
   1056}
   1057
   1058
   1059static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
   1060	u64 gpr)
   1061{
   1062	union kvmppc_one_reg val;
   1063	int offset = kvmppc_get_vmx_dword_offset(vcpu,
   1064			vcpu->arch.mmio_vmx_offset);
   1065	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
   1066
   1067	if (offset == -1)
   1068		return;
   1069
   1070	val.vval = VCPU_VSX_VR(vcpu, index);
   1071	val.vsxval[offset] = gpr;
   1072	VCPU_VSX_VR(vcpu, index) = val.vval;
   1073}
   1074
   1075static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
   1076	u32 gpr32)
   1077{
   1078	union kvmppc_one_reg val;
   1079	int offset = kvmppc_get_vmx_word_offset(vcpu,
   1080			vcpu->arch.mmio_vmx_offset);
   1081	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
   1082
   1083	if (offset == -1)
   1084		return;
   1085
   1086	val.vval = VCPU_VSX_VR(vcpu, index);
   1087	val.vsx32val[offset] = gpr32;
   1088	VCPU_VSX_VR(vcpu, index) = val.vval;
   1089}
   1090
   1091static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
   1092	u16 gpr16)
   1093{
   1094	union kvmppc_one_reg val;
   1095	int offset = kvmppc_get_vmx_hword_offset(vcpu,
   1096			vcpu->arch.mmio_vmx_offset);
   1097	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
   1098
   1099	if (offset == -1)
   1100		return;
   1101
   1102	val.vval = VCPU_VSX_VR(vcpu, index);
   1103	val.vsx16val[offset] = gpr16;
   1104	VCPU_VSX_VR(vcpu, index) = val.vval;
   1105}
   1106
   1107static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
   1108	u8 gpr8)
   1109{
   1110	union kvmppc_one_reg val;
   1111	int offset = kvmppc_get_vmx_byte_offset(vcpu,
   1112			vcpu->arch.mmio_vmx_offset);
   1113	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
   1114
   1115	if (offset == -1)
   1116		return;
   1117
   1118	val.vval = VCPU_VSX_VR(vcpu, index);
   1119	val.vsx8val[offset] = gpr8;
   1120	VCPU_VSX_VR(vcpu, index) = val.vval;
   1121}
   1122#endif /* CONFIG_ALTIVEC */
   1123
   1124#ifdef CONFIG_PPC_FPU
   1125static inline u64 sp_to_dp(u32 fprs)
   1126{
   1127	u64 fprd;
   1128
   1129	preempt_disable();
   1130	enable_kernel_fp();
   1131	asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
   1132	     : "fr0");
   1133	preempt_enable();
   1134	return fprd;
   1135}
   1136
   1137static inline u32 dp_to_sp(u64 fprd)
   1138{
   1139	u32 fprs;
   1140
   1141	preempt_disable();
   1142	enable_kernel_fp();
   1143	asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
   1144	     : "fr0");
   1145	preempt_enable();
   1146	return fprs;
   1147}
   1148
   1149#else
   1150#define sp_to_dp(x)	(x)
   1151#define dp_to_sp(x)	(x)
   1152#endif /* CONFIG_PPC_FPU */
   1153
   1154static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
   1155{
   1156	struct kvm_run *run = vcpu->run;
   1157	u64 gpr;
   1158
   1159	if (run->mmio.len > sizeof(gpr))
   1160		return;
   1161
   1162	if (!vcpu->arch.mmio_host_swabbed) {
   1163		switch (run->mmio.len) {
   1164		case 8: gpr = *(u64 *)run->mmio.data; break;
   1165		case 4: gpr = *(u32 *)run->mmio.data; break;
   1166		case 2: gpr = *(u16 *)run->mmio.data; break;
   1167		case 1: gpr = *(u8 *)run->mmio.data; break;
   1168		}
   1169	} else {
   1170		switch (run->mmio.len) {
   1171		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
   1172		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
   1173		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
   1174		case 1: gpr = *(u8 *)run->mmio.data; break;
   1175		}
   1176	}
   1177
   1178	/* conversion between single and double precision */
   1179	if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
   1180		gpr = sp_to_dp(gpr);
   1181
   1182	if (vcpu->arch.mmio_sign_extend) {
   1183		switch (run->mmio.len) {
   1184#ifdef CONFIG_PPC64
   1185		case 4:
   1186			gpr = (s64)(s32)gpr;
   1187			break;
   1188#endif
   1189		case 2:
   1190			gpr = (s64)(s16)gpr;
   1191			break;
   1192		case 1:
   1193			gpr = (s64)(s8)gpr;
   1194			break;
   1195		}
   1196	}
   1197
   1198	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
   1199	case KVM_MMIO_REG_GPR:
   1200		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
   1201		break;
   1202	case KVM_MMIO_REG_FPR:
   1203		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
   1204			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
   1205
   1206		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
   1207		break;
   1208#ifdef CONFIG_PPC_BOOK3S
   1209	case KVM_MMIO_REG_QPR:
   1210		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
   1211		break;
   1212	case KVM_MMIO_REG_FQPR:
   1213		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
   1214		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
   1215		break;
   1216#endif
   1217#ifdef CONFIG_VSX
   1218	case KVM_MMIO_REG_VSX:
   1219		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
   1220			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
   1221
   1222		if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
   1223			kvmppc_set_vsr_dword(vcpu, gpr);
   1224		else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
   1225			kvmppc_set_vsr_word(vcpu, gpr);
   1226		else if (vcpu->arch.mmio_copy_type ==
   1227				KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
   1228			kvmppc_set_vsr_dword_dump(vcpu, gpr);
   1229		else if (vcpu->arch.mmio_copy_type ==
   1230				KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
   1231			kvmppc_set_vsr_word_dump(vcpu, gpr);
   1232		break;
   1233#endif
   1234#ifdef CONFIG_ALTIVEC
   1235	case KVM_MMIO_REG_VMX:
   1236		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
   1237			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
   1238
   1239		if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
   1240			kvmppc_set_vmx_dword(vcpu, gpr);
   1241		else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
   1242			kvmppc_set_vmx_word(vcpu, gpr);
   1243		else if (vcpu->arch.mmio_copy_type ==
   1244				KVMPPC_VMX_COPY_HWORD)
   1245			kvmppc_set_vmx_hword(vcpu, gpr);
   1246		else if (vcpu->arch.mmio_copy_type ==
   1247				KVMPPC_VMX_COPY_BYTE)
   1248			kvmppc_set_vmx_byte(vcpu, gpr);
   1249		break;
   1250#endif
   1251#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
   1252	case KVM_MMIO_REG_NESTED_GPR:
   1253		if (kvmppc_need_byteswap(vcpu))
   1254			gpr = swab64(gpr);
   1255		kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
   1256				     sizeof(gpr));
   1257		break;
   1258#endif
   1259	default:
   1260		BUG();
   1261	}
   1262}
   1263
   1264static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
   1265				unsigned int rt, unsigned int bytes,
   1266				int is_default_endian, int sign_extend)
   1267{
   1268	struct kvm_run *run = vcpu->run;
   1269	int idx, ret;
   1270	bool host_swabbed;
   1271
   1272	/* Pity C doesn't have a logical XOR operator */
   1273	if (kvmppc_need_byteswap(vcpu)) {
   1274		host_swabbed = is_default_endian;
   1275	} else {
   1276		host_swabbed = !is_default_endian;
   1277	}
   1278
   1279	if (bytes > sizeof(run->mmio.data))
   1280		return EMULATE_FAIL;
   1281
   1282	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
   1283	run->mmio.len = bytes;
   1284	run->mmio.is_write = 0;
   1285
   1286	vcpu->arch.io_gpr = rt;
   1287	vcpu->arch.mmio_host_swabbed = host_swabbed;
   1288	vcpu->mmio_needed = 1;
   1289	vcpu->mmio_is_write = 0;
   1290	vcpu->arch.mmio_sign_extend = sign_extend;
   1291
   1292	idx = srcu_read_lock(&vcpu->kvm->srcu);
   1293
   1294	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
   1295			      bytes, &run->mmio.data);
   1296
   1297	srcu_read_unlock(&vcpu->kvm->srcu, idx);
   1298
   1299	if (!ret) {
   1300		kvmppc_complete_mmio_load(vcpu);
   1301		vcpu->mmio_needed = 0;
   1302		return EMULATE_DONE;
   1303	}
   1304
   1305	return EMULATE_DO_MMIO;
   1306}
   1307
   1308int kvmppc_handle_load(struct kvm_vcpu *vcpu,
   1309		       unsigned int rt, unsigned int bytes,
   1310		       int is_default_endian)
   1311{
   1312	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
   1313}
   1314EXPORT_SYMBOL_GPL(kvmppc_handle_load);
   1315
   1316/* Same as above, but sign extends */
   1317int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
   1318			unsigned int rt, unsigned int bytes,
   1319			int is_default_endian)
   1320{
   1321	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
   1322}
   1323
   1324#ifdef CONFIG_VSX
   1325int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
   1326			unsigned int rt, unsigned int bytes,
   1327			int is_default_endian, int mmio_sign_extend)
   1328{
   1329	enum emulation_result emulated = EMULATE_DONE;
   1330
   1331	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
   1332	if (vcpu->arch.mmio_vsx_copy_nums > 4)
   1333		return EMULATE_FAIL;
   1334
   1335	while (vcpu->arch.mmio_vsx_copy_nums) {
   1336		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
   1337			is_default_endian, mmio_sign_extend);
   1338
   1339		if (emulated != EMULATE_DONE)
   1340			break;
   1341
   1342		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
   1343
   1344		vcpu->arch.mmio_vsx_copy_nums--;
   1345		vcpu->arch.mmio_vsx_offset++;
   1346	}
   1347	return emulated;
   1348}
   1349#endif /* CONFIG_VSX */
   1350
   1351int kvmppc_handle_store(struct kvm_vcpu *vcpu,
   1352			u64 val, unsigned int bytes, int is_default_endian)
   1353{
   1354	struct kvm_run *run = vcpu->run;
   1355	void *data = run->mmio.data;
   1356	int idx, ret;
   1357	bool host_swabbed;
   1358
   1359	/* Pity C doesn't have a logical XOR operator */
   1360	if (kvmppc_need_byteswap(vcpu)) {
   1361		host_swabbed = is_default_endian;
   1362	} else {
   1363		host_swabbed = !is_default_endian;
   1364	}
   1365
   1366	if (bytes > sizeof(run->mmio.data))
   1367		return EMULATE_FAIL;
   1368
   1369	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
   1370	run->mmio.len = bytes;
   1371	run->mmio.is_write = 1;
   1372	vcpu->mmio_needed = 1;
   1373	vcpu->mmio_is_write = 1;
   1374
   1375	if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
   1376		val = dp_to_sp(val);
   1377
   1378	/* Store the value at the lowest bytes in 'data'. */
   1379	if (!host_swabbed) {
   1380		switch (bytes) {
   1381		case 8: *(u64 *)data = val; break;
   1382		case 4: *(u32 *)data = val; break;
   1383		case 2: *(u16 *)data = val; break;
   1384		case 1: *(u8  *)data = val; break;
   1385		}
   1386	} else {
   1387		switch (bytes) {
   1388		case 8: *(u64 *)data = swab64(val); break;
   1389		case 4: *(u32 *)data = swab32(val); break;
   1390		case 2: *(u16 *)data = swab16(val); break;
   1391		case 1: *(u8  *)data = val; break;
   1392		}
   1393	}
   1394
   1395	idx = srcu_read_lock(&vcpu->kvm->srcu);
   1396
   1397	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
   1398			       bytes, &run->mmio.data);
   1399
   1400	srcu_read_unlock(&vcpu->kvm->srcu, idx);
   1401
   1402	if (!ret) {
   1403		vcpu->mmio_needed = 0;
   1404		return EMULATE_DONE;
   1405	}
   1406
   1407	return EMULATE_DO_MMIO;
   1408}
   1409EXPORT_SYMBOL_GPL(kvmppc_handle_store);
   1410
   1411#ifdef CONFIG_VSX
   1412static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
   1413{
   1414	u32 dword_offset, word_offset;
   1415	union kvmppc_one_reg reg;
   1416	int vsx_offset = 0;
   1417	int copy_type = vcpu->arch.mmio_copy_type;
   1418	int result = 0;
   1419
   1420	switch (copy_type) {
   1421	case KVMPPC_VSX_COPY_DWORD:
   1422		vsx_offset =
   1423			kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
   1424
   1425		if (vsx_offset == -1) {
   1426			result = -1;
   1427			break;
   1428		}
   1429
   1430		if (rs < 32) {
   1431			*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
   1432		} else {
   1433			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
   1434			*val = reg.vsxval[vsx_offset];
   1435		}
   1436		break;
   1437
   1438	case KVMPPC_VSX_COPY_WORD:
   1439		vsx_offset =
   1440			kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
   1441
   1442		if (vsx_offset == -1) {
   1443			result = -1;
   1444			break;
   1445		}
   1446
   1447		if (rs < 32) {
   1448			dword_offset = vsx_offset / 2;
   1449			word_offset = vsx_offset % 2;
   1450			reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
   1451			*val = reg.vsx32val[word_offset];
   1452		} else {
   1453			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
   1454			*val = reg.vsx32val[vsx_offset];
   1455		}
   1456		break;
   1457
   1458	default:
   1459		result = -1;
   1460		break;
   1461	}
   1462
   1463	return result;
   1464}
   1465
   1466int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
   1467			int rs, unsigned int bytes, int is_default_endian)
   1468{
   1469	u64 val;
   1470	enum emulation_result emulated = EMULATE_DONE;
   1471
   1472	vcpu->arch.io_gpr = rs;
   1473
   1474	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
   1475	if (vcpu->arch.mmio_vsx_copy_nums > 4)
   1476		return EMULATE_FAIL;
   1477
   1478	while (vcpu->arch.mmio_vsx_copy_nums) {
   1479		if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
   1480			return EMULATE_FAIL;
   1481
   1482		emulated = kvmppc_handle_store(vcpu,
   1483			 val, bytes, is_default_endian);
   1484
   1485		if (emulated != EMULATE_DONE)
   1486			break;
   1487
   1488		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
   1489
   1490		vcpu->arch.mmio_vsx_copy_nums--;
   1491		vcpu->arch.mmio_vsx_offset++;
   1492	}
   1493
   1494	return emulated;
   1495}
   1496
   1497static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
   1498{
   1499	struct kvm_run *run = vcpu->run;
   1500	enum emulation_result emulated = EMULATE_FAIL;
   1501	int r;
   1502
   1503	vcpu->arch.paddr_accessed += run->mmio.len;
   1504
   1505	if (!vcpu->mmio_is_write) {
   1506		emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
   1507			 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
   1508	} else {
   1509		emulated = kvmppc_handle_vsx_store(vcpu,
   1510			 vcpu->arch.io_gpr, run->mmio.len, 1);
   1511	}
   1512
   1513	switch (emulated) {
   1514	case EMULATE_DO_MMIO:
   1515		run->exit_reason = KVM_EXIT_MMIO;
   1516		r = RESUME_HOST;
   1517		break;
   1518	case EMULATE_FAIL:
   1519		pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
   1520		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
   1521		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
   1522		r = RESUME_HOST;
   1523		break;
   1524	default:
   1525		r = RESUME_GUEST;
   1526		break;
   1527	}
   1528	return r;
   1529}
   1530#endif /* CONFIG_VSX */
   1531
   1532#ifdef CONFIG_ALTIVEC
   1533int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
   1534		unsigned int rt, unsigned int bytes, int is_default_endian)
   1535{
   1536	enum emulation_result emulated = EMULATE_DONE;
   1537
   1538	if (vcpu->arch.mmio_vmx_copy_nums > 2)
   1539		return EMULATE_FAIL;
   1540
   1541	while (vcpu->arch.mmio_vmx_copy_nums) {
   1542		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
   1543				is_default_endian, 0);
   1544
   1545		if (emulated != EMULATE_DONE)
   1546			break;
   1547
   1548		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
   1549		vcpu->arch.mmio_vmx_copy_nums--;
   1550		vcpu->arch.mmio_vmx_offset++;
   1551	}
   1552
   1553	return emulated;
   1554}
   1555
   1556static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
   1557{
   1558	union kvmppc_one_reg reg;
   1559	int vmx_offset = 0;
   1560	int result = 0;
   1561
   1562	vmx_offset =
   1563		kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
   1564
   1565	if (vmx_offset == -1)
   1566		return -1;
   1567
   1568	reg.vval = VCPU_VSX_VR(vcpu, index);
   1569	*val = reg.vsxval[vmx_offset];
   1570
   1571	return result;
   1572}
   1573
   1574static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
   1575{
   1576	union kvmppc_one_reg reg;
   1577	int vmx_offset = 0;
   1578	int result = 0;
   1579
   1580	vmx_offset =
   1581		kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
   1582
   1583	if (vmx_offset == -1)
   1584		return -1;
   1585
   1586	reg.vval = VCPU_VSX_VR(vcpu, index);
   1587	*val = reg.vsx32val[vmx_offset];
   1588
   1589	return result;
   1590}
   1591
   1592static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
   1593{
   1594	union kvmppc_one_reg reg;
   1595	int vmx_offset = 0;
   1596	int result = 0;
   1597
   1598	vmx_offset =
   1599		kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
   1600
   1601	if (vmx_offset == -1)
   1602		return -1;
   1603
   1604	reg.vval = VCPU_VSX_VR(vcpu, index);
   1605	*val = reg.vsx16val[vmx_offset];
   1606
   1607	return result;
   1608}
   1609
   1610static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
   1611{
   1612	union kvmppc_one_reg reg;
   1613	int vmx_offset = 0;
   1614	int result = 0;
   1615
   1616	vmx_offset =
   1617		kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
   1618
   1619	if (vmx_offset == -1)
   1620		return -1;
   1621
   1622	reg.vval = VCPU_VSX_VR(vcpu, index);
   1623	*val = reg.vsx8val[vmx_offset];
   1624
   1625	return result;
   1626}
   1627
   1628int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
   1629		unsigned int rs, unsigned int bytes, int is_default_endian)
   1630{
   1631	u64 val = 0;
   1632	unsigned int index = rs & KVM_MMIO_REG_MASK;
   1633	enum emulation_result emulated = EMULATE_DONE;
   1634
   1635	if (vcpu->arch.mmio_vmx_copy_nums > 2)
   1636		return EMULATE_FAIL;
   1637
   1638	vcpu->arch.io_gpr = rs;
   1639
   1640	while (vcpu->arch.mmio_vmx_copy_nums) {
   1641		switch (vcpu->arch.mmio_copy_type) {
   1642		case KVMPPC_VMX_COPY_DWORD:
   1643			if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
   1644				return EMULATE_FAIL;
   1645
   1646			break;
   1647		case KVMPPC_VMX_COPY_WORD:
   1648			if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
   1649				return EMULATE_FAIL;
   1650			break;
   1651		case KVMPPC_VMX_COPY_HWORD:
   1652			if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
   1653				return EMULATE_FAIL;
   1654			break;
   1655		case KVMPPC_VMX_COPY_BYTE:
   1656			if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
   1657				return EMULATE_FAIL;
   1658			break;
   1659		default:
   1660			return EMULATE_FAIL;
   1661		}
   1662
   1663		emulated = kvmppc_handle_store(vcpu, val, bytes,
   1664				is_default_endian);
   1665		if (emulated != EMULATE_DONE)
   1666			break;
   1667
   1668		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
   1669		vcpu->arch.mmio_vmx_copy_nums--;
   1670		vcpu->arch.mmio_vmx_offset++;
   1671	}
   1672
   1673	return emulated;
   1674}
   1675
   1676static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
   1677{
   1678	struct kvm_run *run = vcpu->run;
   1679	enum emulation_result emulated = EMULATE_FAIL;
   1680	int r;
   1681
   1682	vcpu->arch.paddr_accessed += run->mmio.len;
   1683
   1684	if (!vcpu->mmio_is_write) {
   1685		emulated = kvmppc_handle_vmx_load(vcpu,
   1686				vcpu->arch.io_gpr, run->mmio.len, 1);
   1687	} else {
   1688		emulated = kvmppc_handle_vmx_store(vcpu,
   1689				vcpu->arch.io_gpr, run->mmio.len, 1);
   1690	}
   1691
   1692	switch (emulated) {
   1693	case EMULATE_DO_MMIO:
   1694		run->exit_reason = KVM_EXIT_MMIO;
   1695		r = RESUME_HOST;
   1696		break;
   1697	case EMULATE_FAIL:
   1698		pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
   1699		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
   1700		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
   1701		r = RESUME_HOST;
   1702		break;
   1703	default:
   1704		r = RESUME_GUEST;
   1705		break;
   1706	}
   1707	return r;
   1708}
   1709#endif /* CONFIG_ALTIVEC */
   1710
   1711int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
   1712{
   1713	int r = 0;
   1714	union kvmppc_one_reg val;
   1715	int size;
   1716
   1717	size = one_reg_size(reg->id);
   1718	if (size > sizeof(val))
   1719		return -EINVAL;
   1720
   1721	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
   1722	if (r == -EINVAL) {
   1723		r = 0;
   1724		switch (reg->id) {
   1725#ifdef CONFIG_ALTIVEC
   1726		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
   1727			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
   1728				r = -ENXIO;
   1729				break;
   1730			}
   1731			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
   1732			break;
   1733		case KVM_REG_PPC_VSCR:
   1734			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
   1735				r = -ENXIO;
   1736				break;
   1737			}
   1738			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
   1739			break;
   1740		case KVM_REG_PPC_VRSAVE:
   1741			val = get_reg_val(reg->id, vcpu->arch.vrsave);
   1742			break;
   1743#endif /* CONFIG_ALTIVEC */
   1744		default:
   1745			r = -EINVAL;
   1746			break;
   1747		}
   1748	}
   1749
   1750	if (r)
   1751		return r;
   1752
   1753	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
   1754		r = -EFAULT;
   1755
   1756	return r;
   1757}
   1758
   1759int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
   1760{
   1761	int r;
   1762	union kvmppc_one_reg val;
   1763	int size;
   1764
   1765	size = one_reg_size(reg->id);
   1766	if (size > sizeof(val))
   1767		return -EINVAL;
   1768
   1769	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
   1770		return -EFAULT;
   1771
   1772	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
   1773	if (r == -EINVAL) {
   1774		r = 0;
   1775		switch (reg->id) {
   1776#ifdef CONFIG_ALTIVEC
   1777		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
   1778			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
   1779				r = -ENXIO;
   1780				break;
   1781			}
   1782			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
   1783			break;
   1784		case KVM_REG_PPC_VSCR:
   1785			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
   1786				r = -ENXIO;
   1787				break;
   1788			}
   1789			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
   1790			break;
   1791		case KVM_REG_PPC_VRSAVE:
   1792			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
   1793				r = -ENXIO;
   1794				break;
   1795			}
   1796			vcpu->arch.vrsave = set_reg_val(reg->id, val);
   1797			break;
   1798#endif /* CONFIG_ALTIVEC */
   1799		default:
   1800			r = -EINVAL;
   1801			break;
   1802		}
   1803	}
   1804
   1805	return r;
   1806}
   1807
   1808int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
   1809{
   1810	struct kvm_run *run = vcpu->run;
   1811	int r;
   1812
   1813	vcpu_load(vcpu);
   1814
   1815	if (vcpu->mmio_needed) {
   1816		vcpu->mmio_needed = 0;
   1817		if (!vcpu->mmio_is_write)
   1818			kvmppc_complete_mmio_load(vcpu);
   1819#ifdef CONFIG_VSX
   1820		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
   1821			vcpu->arch.mmio_vsx_copy_nums--;
   1822			vcpu->arch.mmio_vsx_offset++;
   1823		}
   1824
   1825		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
   1826			r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
   1827			if (r == RESUME_HOST) {
   1828				vcpu->mmio_needed = 1;
   1829				goto out;
   1830			}
   1831		}
   1832#endif
   1833#ifdef CONFIG_ALTIVEC
   1834		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
   1835			vcpu->arch.mmio_vmx_copy_nums--;
   1836			vcpu->arch.mmio_vmx_offset++;
   1837		}
   1838
   1839		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
   1840			r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
   1841			if (r == RESUME_HOST) {
   1842				vcpu->mmio_needed = 1;
   1843				goto out;
   1844			}
   1845		}
   1846#endif
   1847	} else if (vcpu->arch.osi_needed) {
   1848		u64 *gprs = run->osi.gprs;
   1849		int i;
   1850
   1851		for (i = 0; i < 32; i++)
   1852			kvmppc_set_gpr(vcpu, i, gprs[i]);
   1853		vcpu->arch.osi_needed = 0;
   1854	} else if (vcpu->arch.hcall_needed) {
   1855		int i;
   1856
   1857		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
   1858		for (i = 0; i < 9; ++i)
   1859			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
   1860		vcpu->arch.hcall_needed = 0;
   1861#ifdef CONFIG_BOOKE
   1862	} else if (vcpu->arch.epr_needed) {
   1863		kvmppc_set_epr(vcpu, run->epr.epr);
   1864		vcpu->arch.epr_needed = 0;
   1865#endif
   1866	}
   1867
   1868	kvm_sigset_activate(vcpu);
   1869
   1870	if (run->immediate_exit)
   1871		r = -EINTR;
   1872	else
   1873		r = kvmppc_vcpu_run(vcpu);
   1874
   1875	kvm_sigset_deactivate(vcpu);
   1876
   1877#ifdef CONFIG_ALTIVEC
   1878out:
   1879#endif
   1880
   1881	/*
   1882	 * We're already returning to userspace, don't pass the
   1883	 * RESUME_HOST flags along.
   1884	 */
   1885	if (r > 0)
   1886		r = 0;
   1887
   1888	vcpu_put(vcpu);
   1889	return r;
   1890}
   1891
   1892int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
   1893{
   1894	if (irq->irq == KVM_INTERRUPT_UNSET) {
   1895		kvmppc_core_dequeue_external(vcpu);
   1896		return 0;
   1897	}
   1898
   1899	kvmppc_core_queue_external(vcpu, irq);
   1900
   1901	kvm_vcpu_kick(vcpu);
   1902
   1903	return 0;
   1904}
   1905
   1906static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
   1907				     struct kvm_enable_cap *cap)
   1908{
   1909	int r;
   1910
   1911	if (cap->flags)
   1912		return -EINVAL;
   1913
   1914	switch (cap->cap) {
   1915	case KVM_CAP_PPC_OSI:
   1916		r = 0;
   1917		vcpu->arch.osi_enabled = true;
   1918		break;
   1919	case KVM_CAP_PPC_PAPR:
   1920		r = 0;
   1921		vcpu->arch.papr_enabled = true;
   1922		break;
   1923	case KVM_CAP_PPC_EPR:
   1924		r = 0;
   1925		if (cap->args[0])
   1926			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
   1927		else
   1928			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
   1929		break;
   1930#ifdef CONFIG_BOOKE
   1931	case KVM_CAP_PPC_BOOKE_WATCHDOG:
   1932		r = 0;
   1933		vcpu->arch.watchdog_enabled = true;
   1934		break;
   1935#endif
   1936#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
   1937	case KVM_CAP_SW_TLB: {
   1938		struct kvm_config_tlb cfg;
   1939		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
   1940
   1941		r = -EFAULT;
   1942		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
   1943			break;
   1944
   1945		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
   1946		break;
   1947	}
   1948#endif
   1949#ifdef CONFIG_KVM_MPIC
   1950	case KVM_CAP_IRQ_MPIC: {
   1951		struct fd f;
   1952		struct kvm_device *dev;
   1953
   1954		r = -EBADF;
   1955		f = fdget(cap->args[0]);
   1956		if (!f.file)
   1957			break;
   1958
   1959		r = -EPERM;
   1960		dev = kvm_device_from_filp(f.file);
   1961		if (dev)
   1962			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
   1963
   1964		fdput(f);
   1965		break;
   1966	}
   1967#endif
   1968#ifdef CONFIG_KVM_XICS
   1969	case KVM_CAP_IRQ_XICS: {
   1970		struct fd f;
   1971		struct kvm_device *dev;
   1972
   1973		r = -EBADF;
   1974		f = fdget(cap->args[0]);
   1975		if (!f.file)
   1976			break;
   1977
   1978		r = -EPERM;
   1979		dev = kvm_device_from_filp(f.file);
   1980		if (dev) {
   1981			if (xics_on_xive())
   1982				r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
   1983			else
   1984				r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
   1985		}
   1986
   1987		fdput(f);
   1988		break;
   1989	}
   1990#endif /* CONFIG_KVM_XICS */
   1991#ifdef CONFIG_KVM_XIVE
   1992	case KVM_CAP_PPC_IRQ_XIVE: {
   1993		struct fd f;
   1994		struct kvm_device *dev;
   1995
   1996		r = -EBADF;
   1997		f = fdget(cap->args[0]);
   1998		if (!f.file)
   1999			break;
   2000
   2001		r = -ENXIO;
   2002		if (!xive_enabled())
   2003			break;
   2004
   2005		r = -EPERM;
   2006		dev = kvm_device_from_filp(f.file);
   2007		if (dev)
   2008			r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
   2009							    cap->args[1]);
   2010
   2011		fdput(f);
   2012		break;
   2013	}
   2014#endif /* CONFIG_KVM_XIVE */
   2015#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
   2016	case KVM_CAP_PPC_FWNMI:
   2017		r = -EINVAL;
   2018		if (!is_kvmppc_hv_enabled(vcpu->kvm))
   2019			break;
   2020		r = 0;
   2021		vcpu->kvm->arch.fwnmi_enabled = true;
   2022		break;
   2023#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
   2024	default:
   2025		r = -EINVAL;
   2026		break;
   2027	}
   2028
   2029	if (!r)
   2030		r = kvmppc_sanity_check(vcpu);
   2031
   2032	return r;
   2033}
   2034
   2035bool kvm_arch_intc_initialized(struct kvm *kvm)
   2036{
   2037#ifdef CONFIG_KVM_MPIC
   2038	if (kvm->arch.mpic)
   2039		return true;
   2040#endif
   2041#ifdef CONFIG_KVM_XICS
   2042	if (kvm->arch.xics || kvm->arch.xive)
   2043		return true;
   2044#endif
   2045	return false;
   2046}
   2047
   2048int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
   2049                                    struct kvm_mp_state *mp_state)
   2050{
   2051	return -EINVAL;
   2052}
   2053
   2054int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
   2055                                    struct kvm_mp_state *mp_state)
   2056{
   2057	return -EINVAL;
   2058}
   2059
   2060long kvm_arch_vcpu_async_ioctl(struct file *filp,
   2061			       unsigned int ioctl, unsigned long arg)
   2062{
   2063	struct kvm_vcpu *vcpu = filp->private_data;
   2064	void __user *argp = (void __user *)arg;
   2065
   2066	if (ioctl == KVM_INTERRUPT) {
   2067		struct kvm_interrupt irq;
   2068		if (copy_from_user(&irq, argp, sizeof(irq)))
   2069			return -EFAULT;
   2070		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
   2071	}
   2072	return -ENOIOCTLCMD;
   2073}
   2074
   2075long kvm_arch_vcpu_ioctl(struct file *filp,
   2076                         unsigned int ioctl, unsigned long arg)
   2077{
   2078	struct kvm_vcpu *vcpu = filp->private_data;
   2079	void __user *argp = (void __user *)arg;
   2080	long r;
   2081
   2082	switch (ioctl) {
   2083	case KVM_ENABLE_CAP:
   2084	{
   2085		struct kvm_enable_cap cap;
   2086		r = -EFAULT;
   2087		if (copy_from_user(&cap, argp, sizeof(cap)))
   2088			goto out;
   2089		vcpu_load(vcpu);
   2090		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
   2091		vcpu_put(vcpu);
   2092		break;
   2093	}
   2094
   2095	case KVM_SET_ONE_REG:
   2096	case KVM_GET_ONE_REG:
   2097	{
   2098		struct kvm_one_reg reg;
   2099		r = -EFAULT;
   2100		if (copy_from_user(&reg, argp, sizeof(reg)))
   2101			goto out;
   2102		if (ioctl == KVM_SET_ONE_REG)
   2103			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
   2104		else
   2105			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
   2106		break;
   2107	}
   2108
   2109#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
   2110	case KVM_DIRTY_TLB: {
   2111		struct kvm_dirty_tlb dirty;
   2112		r = -EFAULT;
   2113		if (copy_from_user(&dirty, argp, sizeof(dirty)))
   2114			goto out;
   2115		vcpu_load(vcpu);
   2116		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
   2117		vcpu_put(vcpu);
   2118		break;
   2119	}
   2120#endif
   2121	default:
   2122		r = -EINVAL;
   2123	}
   2124
   2125out:
   2126	return r;
   2127}
   2128
   2129vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
   2130{
   2131	return VM_FAULT_SIGBUS;
   2132}
   2133
   2134static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
   2135{
   2136	u32 inst_nop = 0x60000000;
   2137#ifdef CONFIG_KVM_BOOKE_HV
   2138	u32 inst_sc1 = 0x44000022;
   2139	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
   2140	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
   2141	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
   2142	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
   2143#else
   2144	u32 inst_lis = 0x3c000000;
   2145	u32 inst_ori = 0x60000000;
   2146	u32 inst_sc = 0x44000002;
   2147	u32 inst_imm_mask = 0xffff;
   2148
   2149	/*
   2150	 * The hypercall to get into KVM from within guest context is as
   2151	 * follows:
   2152	 *
   2153	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
   2154	 *    ori r0, KVM_SC_MAGIC_R0@l
   2155	 *    sc
   2156	 *    nop
   2157	 */
   2158	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
   2159	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
   2160	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
   2161	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
   2162#endif
   2163
   2164	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
   2165
   2166	return 0;
   2167}
   2168
   2169int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
   2170			  bool line_status)
   2171{
   2172	if (!irqchip_in_kernel(kvm))
   2173		return -ENXIO;
   2174
   2175	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
   2176					irq_event->irq, irq_event->level,
   2177					line_status);
   2178	return 0;
   2179}
   2180
   2181
   2182int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
   2183			    struct kvm_enable_cap *cap)
   2184{
   2185	int r;
   2186
   2187	if (cap->flags)
   2188		return -EINVAL;
   2189
   2190	switch (cap->cap) {
   2191#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
   2192	case KVM_CAP_PPC_ENABLE_HCALL: {
   2193		unsigned long hcall = cap->args[0];
   2194
   2195		r = -EINVAL;
   2196		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
   2197		    cap->args[1] > 1)
   2198			break;
   2199		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
   2200			break;
   2201		if (cap->args[1])
   2202			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
   2203		else
   2204			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
   2205		r = 0;
   2206		break;
   2207	}
   2208	case KVM_CAP_PPC_SMT: {
   2209		unsigned long mode = cap->args[0];
   2210		unsigned long flags = cap->args[1];
   2211
   2212		r = -EINVAL;
   2213		if (kvm->arch.kvm_ops->set_smt_mode)
   2214			r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
   2215		break;
   2216	}
   2217
   2218	case KVM_CAP_PPC_NESTED_HV:
   2219		r = -EINVAL;
   2220		if (!is_kvmppc_hv_enabled(kvm) ||
   2221		    !kvm->arch.kvm_ops->enable_nested)
   2222			break;
   2223		r = kvm->arch.kvm_ops->enable_nested(kvm);
   2224		break;
   2225#endif
   2226#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
   2227	case KVM_CAP_PPC_SECURE_GUEST:
   2228		r = -EINVAL;
   2229		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
   2230			break;
   2231		r = kvm->arch.kvm_ops->enable_svm(kvm);
   2232		break;
   2233	case KVM_CAP_PPC_DAWR1:
   2234		r = -EINVAL;
   2235		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
   2236			break;
   2237		r = kvm->arch.kvm_ops->enable_dawr1(kvm);
   2238		break;
   2239#endif
   2240	default:
   2241		r = -EINVAL;
   2242		break;
   2243	}
   2244
   2245	return r;
   2246}
   2247
   2248#ifdef CONFIG_PPC_BOOK3S_64
   2249/*
   2250 * These functions check whether the underlying hardware is safe
   2251 * against attacks based on observing the effects of speculatively
   2252 * executed instructions, and whether it supplies instructions for
   2253 * use in workarounds.  The information comes from firmware, either
   2254 * via the device tree on powernv platforms or from an hcall on
   2255 * pseries platforms.
   2256 */
   2257#ifdef CONFIG_PPC_PSERIES
   2258static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
   2259{
   2260	struct h_cpu_char_result c;
   2261	unsigned long rc;
   2262
   2263	if (!machine_is(pseries))
   2264		return -ENOTTY;
   2265
   2266	rc = plpar_get_cpu_characteristics(&c);
   2267	if (rc == H_SUCCESS) {
   2268		cp->character = c.character;
   2269		cp->behaviour = c.behaviour;
   2270		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
   2271			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
   2272			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
   2273			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
   2274			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
   2275			KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
   2276			KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
   2277			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
   2278			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
   2279		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
   2280			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
   2281			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
   2282			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
   2283	}
   2284	return 0;
   2285}
   2286#else
   2287static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
   2288{
   2289	return -ENOTTY;
   2290}
   2291#endif
   2292
   2293static inline bool have_fw_feat(struct device_node *fw_features,
   2294				const char *state, const char *name)
   2295{
   2296	struct device_node *np;
   2297	bool r = false;
   2298
   2299	np = of_get_child_by_name(fw_features, name);
   2300	if (np) {
   2301		r = of_property_read_bool(np, state);
   2302		of_node_put(np);
   2303	}
   2304	return r;
   2305}
   2306
   2307static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
   2308{
   2309	struct device_node *np, *fw_features;
   2310	int r;
   2311
   2312	memset(cp, 0, sizeof(*cp));
   2313	r = pseries_get_cpu_char(cp);
   2314	if (r != -ENOTTY)
   2315		return r;
   2316
   2317	np = of_find_node_by_name(NULL, "ibm,opal");
   2318	if (np) {
   2319		fw_features = of_get_child_by_name(np, "fw-features");
   2320		of_node_put(np);
   2321		if (!fw_features)
   2322			return 0;
   2323		if (have_fw_feat(fw_features, "enabled",
   2324				 "inst-spec-barrier-ori31,31,0"))
   2325			cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
   2326		if (have_fw_feat(fw_features, "enabled",
   2327				 "fw-bcctrl-serialized"))
   2328			cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
   2329		if (have_fw_feat(fw_features, "enabled",
   2330				 "inst-l1d-flush-ori30,30,0"))
   2331			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
   2332		if (have_fw_feat(fw_features, "enabled",
   2333				 "inst-l1d-flush-trig2"))
   2334			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
   2335		if (have_fw_feat(fw_features, "enabled",
   2336				 "fw-l1d-thread-split"))
   2337			cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
   2338		if (have_fw_feat(fw_features, "enabled",
   2339				 "fw-count-cache-disabled"))
   2340			cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
   2341		if (have_fw_feat(fw_features, "enabled",
   2342				 "fw-count-cache-flush-bcctr2,0,0"))
   2343			cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
   2344		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
   2345			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
   2346			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
   2347			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
   2348			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
   2349			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
   2350			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
   2351
   2352		if (have_fw_feat(fw_features, "enabled",
   2353				 "speculation-policy-favor-security"))
   2354			cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
   2355		if (!have_fw_feat(fw_features, "disabled",
   2356				  "needs-l1d-flush-msr-pr-0-to-1"))
   2357			cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
   2358		if (!have_fw_feat(fw_features, "disabled",
   2359				  "needs-spec-barrier-for-bound-checks"))
   2360			cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
   2361		if (have_fw_feat(fw_features, "enabled",
   2362				 "needs-count-cache-flush-on-context-switch"))
   2363			cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
   2364		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
   2365			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
   2366			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
   2367			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
   2368
   2369		of_node_put(fw_features);
   2370	}
   2371
   2372	return 0;
   2373}
   2374#endif
   2375
   2376long kvm_arch_vm_ioctl(struct file *filp,
   2377                       unsigned int ioctl, unsigned long arg)
   2378{
   2379	struct kvm *kvm __maybe_unused = filp->private_data;
   2380	void __user *argp = (void __user *)arg;
   2381	long r;
   2382
   2383	switch (ioctl) {
   2384	case KVM_PPC_GET_PVINFO: {
   2385		struct kvm_ppc_pvinfo pvinfo;
   2386		memset(&pvinfo, 0, sizeof(pvinfo));
   2387		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
   2388		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
   2389			r = -EFAULT;
   2390			goto out;
   2391		}
   2392
   2393		break;
   2394	}
   2395#ifdef CONFIG_SPAPR_TCE_IOMMU
   2396	case KVM_CREATE_SPAPR_TCE_64: {
   2397		struct kvm_create_spapr_tce_64 create_tce_64;
   2398
   2399		r = -EFAULT;
   2400		if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
   2401			goto out;
   2402		if (create_tce_64.flags) {
   2403			r = -EINVAL;
   2404			goto out;
   2405		}
   2406		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
   2407		goto out;
   2408	}
   2409	case KVM_CREATE_SPAPR_TCE: {
   2410		struct kvm_create_spapr_tce create_tce;
   2411		struct kvm_create_spapr_tce_64 create_tce_64;
   2412
   2413		r = -EFAULT;
   2414		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
   2415			goto out;
   2416
   2417		create_tce_64.liobn = create_tce.liobn;
   2418		create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
   2419		create_tce_64.offset = 0;
   2420		create_tce_64.size = create_tce.window_size >>
   2421				IOMMU_PAGE_SHIFT_4K;
   2422		create_tce_64.flags = 0;
   2423		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
   2424		goto out;
   2425	}
   2426#endif
   2427#ifdef CONFIG_PPC_BOOK3S_64
   2428	case KVM_PPC_GET_SMMU_INFO: {
   2429		struct kvm_ppc_smmu_info info;
   2430		struct kvm *kvm = filp->private_data;
   2431
   2432		memset(&info, 0, sizeof(info));
   2433		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
   2434		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
   2435			r = -EFAULT;
   2436		break;
   2437	}
   2438	case KVM_PPC_RTAS_DEFINE_TOKEN: {
   2439		struct kvm *kvm = filp->private_data;
   2440
   2441		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
   2442		break;
   2443	}
   2444	case KVM_PPC_CONFIGURE_V3_MMU: {
   2445		struct kvm *kvm = filp->private_data;
   2446		struct kvm_ppc_mmuv3_cfg cfg;
   2447
   2448		r = -EINVAL;
   2449		if (!kvm->arch.kvm_ops->configure_mmu)
   2450			goto out;
   2451		r = -EFAULT;
   2452		if (copy_from_user(&cfg, argp, sizeof(cfg)))
   2453			goto out;
   2454		r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
   2455		break;
   2456	}
   2457	case KVM_PPC_GET_RMMU_INFO: {
   2458		struct kvm *kvm = filp->private_data;
   2459		struct kvm_ppc_rmmu_info info;
   2460
   2461		r = -EINVAL;
   2462		if (!kvm->arch.kvm_ops->get_rmmu_info)
   2463			goto out;
   2464		r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
   2465		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
   2466			r = -EFAULT;
   2467		break;
   2468	}
   2469	case KVM_PPC_GET_CPU_CHAR: {
   2470		struct kvm_ppc_cpu_char cpuchar;
   2471
   2472		r = kvmppc_get_cpu_char(&cpuchar);
   2473		if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
   2474			r = -EFAULT;
   2475		break;
   2476	}
   2477	case KVM_PPC_SVM_OFF: {
   2478		struct kvm *kvm = filp->private_data;
   2479
   2480		r = 0;
   2481		if (!kvm->arch.kvm_ops->svm_off)
   2482			goto out;
   2483
   2484		r = kvm->arch.kvm_ops->svm_off(kvm);
   2485		break;
   2486	}
   2487	default: {
   2488		struct kvm *kvm = filp->private_data;
   2489		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
   2490	}
   2491#else /* CONFIG_PPC_BOOK3S_64 */
   2492	default:
   2493		r = -ENOTTY;
   2494#endif
   2495	}
   2496out:
   2497	return r;
   2498}
   2499
   2500static DEFINE_IDA(lpid_inuse);
   2501static unsigned long nr_lpids;
   2502
   2503long kvmppc_alloc_lpid(void)
   2504{
   2505	int lpid;
   2506
   2507	/* The host LPID must always be 0 (allocation starts at 1) */
   2508	lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
   2509	if (lpid < 0) {
   2510		if (lpid == -ENOMEM)
   2511			pr_err("%s: Out of memory\n", __func__);
   2512		else
   2513			pr_err("%s: No LPIDs free\n", __func__);
   2514		return -ENOMEM;
   2515	}
   2516
   2517	return lpid;
   2518}
   2519EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
   2520
   2521void kvmppc_free_lpid(long lpid)
   2522{
   2523	ida_free(&lpid_inuse, lpid);
   2524}
   2525EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
   2526
   2527/* nr_lpids_param includes the host LPID */
   2528void kvmppc_init_lpid(unsigned long nr_lpids_param)
   2529{
   2530	nr_lpids = nr_lpids_param;
   2531}
   2532EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
   2533
   2534int kvm_arch_init(void *opaque)
   2535{
   2536	return 0;
   2537}
   2538
   2539EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
   2540
   2541void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
   2542{
   2543	if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
   2544		vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
   2545}
   2546
   2547int kvm_arch_create_vm_debugfs(struct kvm *kvm)
   2548{
   2549	if (kvm->arch.kvm_ops->create_vm_debugfs)
   2550		kvm->arch.kvm_ops->create_vm_debugfs(kvm);
   2551	return 0;
   2552}