cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

book3s_hv_uvmem.c (33978B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Secure pages management: Migration of pages between normal and secure
      4 * memory of KVM guests.
      5 *
      6 * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
      7 */
      8
      9/*
     10 * A pseries guest can be run as secure guest on Ultravisor-enabled
     11 * POWER platforms. On such platforms, this driver will be used to manage
     12 * the movement of guest pages between the normal memory managed by
     13 * hypervisor (HV) and secure memory managed by Ultravisor (UV).
     14 *
     15 * The page-in or page-out requests from UV will come to HV as hcalls and
     16 * HV will call back into UV via ultracalls to satisfy these page requests.
     17 *
     18 * Private ZONE_DEVICE memory equal to the amount of secure memory
     19 * available in the platform for running secure guests is hotplugged.
     20 * Whenever a page belonging to the guest becomes secure, a page from this
     21 * private device memory is used to represent and track that secure page
     22 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
     23 * shared between UV and HV. However such pages aren't represented by
     24 * device private memory and mappings to shared memory exist in both
     25 * UV and HV page tables.
     26 */
     27
     28/*
     29 * Notes on locking
     30 *
     31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
     32 * page-in and page-out requests for the same GPA. Concurrent accesses
     33 * can either come via UV (guest vCPUs requesting for same page)
     34 * or when HV and guest simultaneously access the same page.
     35 * This mutex serializes the migration of page from HV(normal) to
     36 * UV(secure) and vice versa. So the serialization points are around
     37 * migrate_vma routines and page-in/out routines.
     38 *
     39 * Per-guest mutex comes with a cost though. Mainly it serializes the
     40 * fault path as page-out can occur when HV faults on accessing secure
     41 * guest pages. Currently UV issues page-in requests for all the guest
     42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is
     43 * not a cause for concern. Also currently the number of page-outs caused
     44 * by HV touching secure pages is very very low. If an when UV supports
     45 * overcommitting, then we might see concurrent guest driven page-outs.
     46 *
     47 * Locking order
     48 *
     49 * 1. kvm->srcu - Protects KVM memslots
     50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
     51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
     52 *			     as sync-points for page-in/out
     53 */
     54
     55/*
     56 * Notes on page size
     57 *
     58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
     59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
     60 * secure GPAs at 64K page size and maintains one device PFN for each
     61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
     62 * for 64K page at a time.
     63 *
     64 * HV faulting on secure pages: When HV touches any secure page, it
     65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
     66 * UV splits and remaps the 2MB page if necessary and copies out the
     67 * required 64K page contents.
     68 *
     69 * Shared pages: Whenever guest shares a secure page, UV will split and
     70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
     71 *
     72 * HV invalidating a page: When a regular page belonging to secure
     73 * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
     74 * page size. Using 64K page size is correct here because any non-secure
     75 * page will essentially be of 64K page size. Splitting by UV during sharing
     76 * and page-out ensures this.
     77 *
     78 * Page fault handling: When HV handles page fault of a page belonging
     79 * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
     80 * Using 64K size is correct here too as UV would have split the 2MB page
     81 * into 64k mappings and would have done page-outs earlier.
     82 *
     83 * In summary, the current secure pages handling code in HV assumes
     84 * 64K page size and in fact fails any page-in/page-out requests of
     85 * non-64K size upfront. If and when UV starts supporting multiple
     86 * page-sizes, we need to break this assumption.
     87 */
     88
     89#include <linux/pagemap.h>
     90#include <linux/migrate.h>
     91#include <linux/kvm_host.h>
     92#include <linux/ksm.h>
     93#include <linux/of.h>
     94#include <linux/memremap.h>
     95#include <asm/ultravisor.h>
     96#include <asm/mman.h>
     97#include <asm/kvm_ppc.h>
     98#include <asm/kvm_book3s_uvmem.h>
     99
    100static struct dev_pagemap kvmppc_uvmem_pgmap;
    101static unsigned long *kvmppc_uvmem_bitmap;
    102static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
    103
    104/*
    105 * States of a GFN
    106 * ---------------
    107 * The GFN can be in one of the following states.
    108 *
    109 * (a) Secure - The GFN is secure. The GFN is associated with
    110 *	a Secure VM, the contents of the GFN is not accessible
    111 *	to the Hypervisor.  This GFN can be backed by a secure-PFN,
    112 *	or can be backed by a normal-PFN with contents encrypted.
    113 *	The former is true when the GFN is paged-in into the
    114 *	ultravisor. The latter is true when the GFN is paged-out
    115 *	of the ultravisor.
    116 *
    117 * (b) Shared - The GFN is shared. The GFN is associated with a
    118 *	a secure VM. The contents of the GFN is accessible to
    119 *	Hypervisor. This GFN is backed by a normal-PFN and its
    120 *	content is un-encrypted.
    121 *
    122 * (c) Normal - The GFN is a normal. The GFN is associated with
    123 *	a normal VM. The contents of the GFN is accessible to
    124 *	the Hypervisor. Its content is never encrypted.
    125 *
    126 * States of a VM.
    127 * ---------------
    128 *
    129 * Normal VM:  A VM whose contents are always accessible to
    130 *	the hypervisor.  All its GFNs are normal-GFNs.
    131 *
    132 * Secure VM: A VM whose contents are not accessible to the
    133 *	hypervisor without the VM's consent.  Its GFNs are
    134 *	either Shared-GFN or Secure-GFNs.
    135 *
    136 * Transient VM: A Normal VM that is transitioning to secure VM.
    137 *	The transition starts on successful return of
    138 *	H_SVM_INIT_START, and ends on successful return
    139 *	of H_SVM_INIT_DONE. This transient VM, can have GFNs
    140 *	in any of the three states; i.e Secure-GFN, Shared-GFN,
    141 *	and Normal-GFN.	The VM never executes in this state
    142 *	in supervisor-mode.
    143 *
    144 * Memory slot State.
    145 * -----------------------------
    146 *	The state of a memory slot mirrors the state of the
    147 *	VM the memory slot is associated with.
    148 *
    149 * VM State transition.
    150 * --------------------
    151 *
    152 *  A VM always starts in Normal Mode.
    153 *
    154 *  H_SVM_INIT_START moves the VM into transient state. During this
    155 *  time the Ultravisor may request some of its GFNs to be shared or
    156 *  secured. So its GFNs can be in one of the three GFN states.
    157 *
    158 *  H_SVM_INIT_DONE moves the VM entirely from transient state to
    159 *  secure-state. At this point any left-over normal-GFNs are
    160 *  transitioned to Secure-GFN.
    161 *
    162 *  H_SVM_INIT_ABORT moves the transient VM back to normal VM.
    163 *  All its GFNs are moved to Normal-GFNs.
    164 *
    165 *  UV_TERMINATE transitions the secure-VM back to normal-VM. All
    166 *  the secure-GFN and shared-GFNs are tranistioned to normal-GFN
    167 *  Note: The contents of the normal-GFN is undefined at this point.
    168 *
    169 * GFN state implementation:
    170 * -------------------------
    171 *
    172 * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
    173 * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
    174 * set, and contains the value of the secure-PFN.
    175 * It is associated with a normal-PFN; also called mem_pfn, when
    176 * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set.
    177 * The value of the normal-PFN is not tracked.
    178 *
    179 * Shared GFN is associated with a normal-PFN. Its pfn[] has
    180 * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
    181 * is not tracked.
    182 *
    183 * Normal GFN is associated with normal-PFN. Its pfn[] has
    184 * no flag set. The value of the normal-PFN is not tracked.
    185 *
    186 * Life cycle of a GFN
    187 * --------------------
    188 *
    189 * --------------------------------------------------------------
    190 * |        |     Share  |  Unshare | SVM       |H_SVM_INIT_DONE|
    191 * |        |operation   |operation | abort/    |               |
    192 * |        |            |          | terminate |               |
    193 * -------------------------------------------------------------
    194 * |        |            |          |           |               |
    195 * | Secure |     Shared | Secure   |Normal     |Secure         |
    196 * |        |            |          |           |               |
    197 * | Shared |     Shared | Secure   |Normal     |Shared         |
    198 * |        |            |          |           |               |
    199 * | Normal |     Shared | Secure   |Normal     |Secure         |
    200 * --------------------------------------------------------------
    201 *
    202 * Life cycle of a VM
    203 * --------------------
    204 *
    205 * --------------------------------------------------------------------
    206 * |         |  start    |  H_SVM_  |H_SVM_   |H_SVM_     |UV_SVM_    |
    207 * |         |  VM       |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE  |
    208 * |         |           |          |         |           |           |
    209 * --------- ----------------------------------------------------------
    210 * |         |           |          |         |           |           |
    211 * | Normal  | Normal    | Transient|Error    |Error      |Normal     |
    212 * |         |           |          |         |           |           |
    213 * | Secure  |   Error   | Error    |Error    |Error      |Normal     |
    214 * |         |           |          |         |           |           |
    215 * |Transient|   N/A     | Error    |Secure   |Normal     |Normal     |
    216 * --------------------------------------------------------------------
    217 */
    218
    219#define KVMPPC_GFN_UVMEM_PFN	(1UL << 63)
    220#define KVMPPC_GFN_MEM_PFN	(1UL << 62)
    221#define KVMPPC_GFN_SHARED	(1UL << 61)
    222#define KVMPPC_GFN_SECURE	(KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
    223#define KVMPPC_GFN_FLAG_MASK	(KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
    224#define KVMPPC_GFN_PFN_MASK	(~KVMPPC_GFN_FLAG_MASK)
    225
    226struct kvmppc_uvmem_slot {
    227	struct list_head list;
    228	unsigned long nr_pfns;
    229	unsigned long base_pfn;
    230	unsigned long *pfns;
    231};
    232struct kvmppc_uvmem_page_pvt {
    233	struct kvm *kvm;
    234	unsigned long gpa;
    235	bool skip_page_out;
    236	bool remove_gfn;
    237};
    238
    239bool kvmppc_uvmem_available(void)
    240{
    241	/*
    242	 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
    243	 * and our data structures have been initialized successfully.
    244	 */
    245	return !!kvmppc_uvmem_bitmap;
    246}
    247
    248int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
    249{
    250	struct kvmppc_uvmem_slot *p;
    251
    252	p = kzalloc(sizeof(*p), GFP_KERNEL);
    253	if (!p)
    254		return -ENOMEM;
    255	p->pfns = vcalloc(slot->npages, sizeof(*p->pfns));
    256	if (!p->pfns) {
    257		kfree(p);
    258		return -ENOMEM;
    259	}
    260	p->nr_pfns = slot->npages;
    261	p->base_pfn = slot->base_gfn;
    262
    263	mutex_lock(&kvm->arch.uvmem_lock);
    264	list_add(&p->list, &kvm->arch.uvmem_pfns);
    265	mutex_unlock(&kvm->arch.uvmem_lock);
    266
    267	return 0;
    268}
    269
    270/*
    271 * All device PFNs are already released by the time we come here.
    272 */
    273void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
    274{
    275	struct kvmppc_uvmem_slot *p, *next;
    276
    277	mutex_lock(&kvm->arch.uvmem_lock);
    278	list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
    279		if (p->base_pfn == slot->base_gfn) {
    280			vfree(p->pfns);
    281			list_del(&p->list);
    282			kfree(p);
    283			break;
    284		}
    285	}
    286	mutex_unlock(&kvm->arch.uvmem_lock);
    287}
    288
    289static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
    290			unsigned long flag, unsigned long uvmem_pfn)
    291{
    292	struct kvmppc_uvmem_slot *p;
    293
    294	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
    295		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
    296			unsigned long index = gfn - p->base_pfn;
    297
    298			if (flag == KVMPPC_GFN_UVMEM_PFN)
    299				p->pfns[index] = uvmem_pfn | flag;
    300			else
    301				p->pfns[index] = flag;
    302			return;
    303		}
    304	}
    305}
    306
    307/* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
    308static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
    309			unsigned long uvmem_pfn, struct kvm *kvm)
    310{
    311	kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
    312}
    313
    314/* mark the GFN as secure-GFN associated with a memory-PFN. */
    315static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
    316{
    317	kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
    318}
    319
    320/* mark the GFN as a shared GFN. */
    321static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
    322{
    323	kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
    324}
    325
    326/* mark the GFN as a non-existent GFN. */
    327static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
    328{
    329	kvmppc_mark_gfn(gfn, kvm, 0, 0);
    330}
    331
    332/* return true, if the GFN is a secure-GFN backed by a secure-PFN */
    333static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
    334				    unsigned long *uvmem_pfn)
    335{
    336	struct kvmppc_uvmem_slot *p;
    337
    338	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
    339		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
    340			unsigned long index = gfn - p->base_pfn;
    341
    342			if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
    343				if (uvmem_pfn)
    344					*uvmem_pfn = p->pfns[index] &
    345						     KVMPPC_GFN_PFN_MASK;
    346				return true;
    347			} else
    348				return false;
    349		}
    350	}
    351	return false;
    352}
    353
    354/*
    355 * starting from *gfn search for the next available GFN that is not yet
    356 * transitioned to a secure GFN.  return the value of that GFN in *gfn.  If a
    357 * GFN is found, return true, else return false
    358 *
    359 * Must be called with kvm->arch.uvmem_lock  held.
    360 */
    361static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
    362		struct kvm *kvm, unsigned long *gfn)
    363{
    364	struct kvmppc_uvmem_slot *p = NULL, *iter;
    365	bool ret = false;
    366	unsigned long i;
    367
    368	list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
    369		if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
    370			p = iter;
    371			break;
    372		}
    373	if (!p)
    374		return ret;
    375	/*
    376	 * The code below assumes, one to one correspondence between
    377	 * kvmppc_uvmem_slot and memslot.
    378	 */
    379	for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
    380		unsigned long index = i - p->base_pfn;
    381
    382		if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
    383			*gfn = i;
    384			ret = true;
    385			break;
    386		}
    387	}
    388	return ret;
    389}
    390
    391static int kvmppc_memslot_page_merge(struct kvm *kvm,
    392		const struct kvm_memory_slot *memslot, bool merge)
    393{
    394	unsigned long gfn = memslot->base_gfn;
    395	unsigned long end, start = gfn_to_hva(kvm, gfn);
    396	int ret = 0;
    397	struct vm_area_struct *vma;
    398	int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
    399
    400	if (kvm_is_error_hva(start))
    401		return H_STATE;
    402
    403	end = start + (memslot->npages << PAGE_SHIFT);
    404
    405	mmap_write_lock(kvm->mm);
    406	do {
    407		vma = find_vma_intersection(kvm->mm, start, end);
    408		if (!vma) {
    409			ret = H_STATE;
    410			break;
    411		}
    412		ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
    413			  merge_flag, &vma->vm_flags);
    414		if (ret) {
    415			ret = H_STATE;
    416			break;
    417		}
    418		start = vma->vm_end;
    419	} while (end > vma->vm_end);
    420
    421	mmap_write_unlock(kvm->mm);
    422	return ret;
    423}
    424
    425static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
    426		const struct kvm_memory_slot *memslot)
    427{
    428	uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
    429	kvmppc_uvmem_slot_free(kvm, memslot);
    430	kvmppc_memslot_page_merge(kvm, memslot, true);
    431}
    432
    433static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
    434		const struct kvm_memory_slot *memslot)
    435{
    436	int ret = H_PARAMETER;
    437
    438	if (kvmppc_memslot_page_merge(kvm, memslot, false))
    439		return ret;
    440
    441	if (kvmppc_uvmem_slot_init(kvm, memslot))
    442		goto out1;
    443
    444	ret = uv_register_mem_slot(kvm->arch.lpid,
    445				   memslot->base_gfn << PAGE_SHIFT,
    446				   memslot->npages * PAGE_SIZE,
    447				   0, memslot->id);
    448	if (ret < 0) {
    449		ret = H_PARAMETER;
    450		goto out;
    451	}
    452	return 0;
    453out:
    454	kvmppc_uvmem_slot_free(kvm, memslot);
    455out1:
    456	kvmppc_memslot_page_merge(kvm, memslot, true);
    457	return ret;
    458}
    459
    460unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
    461{
    462	struct kvm_memslots *slots;
    463	struct kvm_memory_slot *memslot, *m;
    464	int ret = H_SUCCESS;
    465	int srcu_idx, bkt;
    466
    467	kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
    468
    469	if (!kvmppc_uvmem_bitmap)
    470		return H_UNSUPPORTED;
    471
    472	/* Only radix guests can be secure guests */
    473	if (!kvm_is_radix(kvm))
    474		return H_UNSUPPORTED;
    475
    476	/* NAK the transition to secure if not enabled */
    477	if (!kvm->arch.svm_enabled)
    478		return H_AUTHORITY;
    479
    480	srcu_idx = srcu_read_lock(&kvm->srcu);
    481
    482	/* register the memslot */
    483	slots = kvm_memslots(kvm);
    484	kvm_for_each_memslot(memslot, bkt, slots) {
    485		ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
    486		if (ret)
    487			break;
    488	}
    489
    490	if (ret) {
    491		slots = kvm_memslots(kvm);
    492		kvm_for_each_memslot(m, bkt, slots) {
    493			if (m == memslot)
    494				break;
    495			__kvmppc_uvmem_memslot_delete(kvm, memslot);
    496		}
    497	}
    498
    499	srcu_read_unlock(&kvm->srcu, srcu_idx);
    500	return ret;
    501}
    502
    503/*
    504 * Provision a new page on HV side and copy over the contents
    505 * from secure memory using UV_PAGE_OUT uvcall.
    506 * Caller must held kvm->arch.uvmem_lock.
    507 */
    508static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
    509		unsigned long start,
    510		unsigned long end, unsigned long page_shift,
    511		struct kvm *kvm, unsigned long gpa)
    512{
    513	unsigned long src_pfn, dst_pfn = 0;
    514	struct migrate_vma mig;
    515	struct page *dpage, *spage;
    516	struct kvmppc_uvmem_page_pvt *pvt;
    517	unsigned long pfn;
    518	int ret = U_SUCCESS;
    519
    520	memset(&mig, 0, sizeof(mig));
    521	mig.vma = vma;
    522	mig.start = start;
    523	mig.end = end;
    524	mig.src = &src_pfn;
    525	mig.dst = &dst_pfn;
    526	mig.pgmap_owner = &kvmppc_uvmem_pgmap;
    527	mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
    528
    529	/* The requested page is already paged-out, nothing to do */
    530	if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
    531		return ret;
    532
    533	ret = migrate_vma_setup(&mig);
    534	if (ret)
    535		return -1;
    536
    537	spage = migrate_pfn_to_page(*mig.src);
    538	if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
    539		goto out_finalize;
    540
    541	if (!is_zone_device_page(spage))
    542		goto out_finalize;
    543
    544	dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
    545	if (!dpage) {
    546		ret = -1;
    547		goto out_finalize;
    548	}
    549
    550	lock_page(dpage);
    551	pvt = spage->zone_device_data;
    552	pfn = page_to_pfn(dpage);
    553
    554	/*
    555	 * This function is used in two cases:
    556	 * - When HV touches a secure page, for which we do UV_PAGE_OUT
    557	 * - When a secure page is converted to shared page, we *get*
    558	 *   the page to essentially unmap the device page. In this
    559	 *   case we skip page-out.
    560	 */
    561	if (!pvt->skip_page_out)
    562		ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
    563				  gpa, 0, page_shift);
    564
    565	if (ret == U_SUCCESS)
    566		*mig.dst = migrate_pfn(pfn);
    567	else {
    568		unlock_page(dpage);
    569		__free_page(dpage);
    570		goto out_finalize;
    571	}
    572
    573	migrate_vma_pages(&mig);
    574
    575out_finalize:
    576	migrate_vma_finalize(&mig);
    577	return ret;
    578}
    579
    580static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
    581				      unsigned long start, unsigned long end,
    582				      unsigned long page_shift,
    583				      struct kvm *kvm, unsigned long gpa)
    584{
    585	int ret;
    586
    587	mutex_lock(&kvm->arch.uvmem_lock);
    588	ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
    589	mutex_unlock(&kvm->arch.uvmem_lock);
    590
    591	return ret;
    592}
    593
    594/*
    595 * Drop device pages that we maintain for the secure guest
    596 *
    597 * We first mark the pages to be skipped from UV_PAGE_OUT when there
    598 * is HV side fault on these pages. Next we *get* these pages, forcing
    599 * fault on them, do fault time migration to replace the device PTEs in
    600 * QEMU page table with normal PTEs from newly allocated pages.
    601 */
    602void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
    603			     struct kvm *kvm, bool skip_page_out)
    604{
    605	int i;
    606	struct kvmppc_uvmem_page_pvt *pvt;
    607	struct page *uvmem_page;
    608	struct vm_area_struct *vma = NULL;
    609	unsigned long uvmem_pfn, gfn;
    610	unsigned long addr;
    611
    612	mmap_read_lock(kvm->mm);
    613
    614	addr = slot->userspace_addr;
    615
    616	gfn = slot->base_gfn;
    617	for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
    618
    619		/* Fetch the VMA if addr is not in the latest fetched one */
    620		if (!vma || addr >= vma->vm_end) {
    621			vma = vma_lookup(kvm->mm, addr);
    622			if (!vma) {
    623				pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
    624				break;
    625			}
    626		}
    627
    628		mutex_lock(&kvm->arch.uvmem_lock);
    629
    630		if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
    631			uvmem_page = pfn_to_page(uvmem_pfn);
    632			pvt = uvmem_page->zone_device_data;
    633			pvt->skip_page_out = skip_page_out;
    634			pvt->remove_gfn = true;
    635
    636			if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
    637						  PAGE_SHIFT, kvm, pvt->gpa))
    638				pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
    639				       pvt->gpa, addr);
    640		} else {
    641			/* Remove the shared flag if any */
    642			kvmppc_gfn_remove(gfn, kvm);
    643		}
    644
    645		mutex_unlock(&kvm->arch.uvmem_lock);
    646	}
    647
    648	mmap_read_unlock(kvm->mm);
    649}
    650
    651unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
    652{
    653	int srcu_idx, bkt;
    654	struct kvm_memory_slot *memslot;
    655
    656	/*
    657	 * Expect to be called only after INIT_START and before INIT_DONE.
    658	 * If INIT_DONE was completed, use normal VM termination sequence.
    659	 */
    660	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
    661		return H_UNSUPPORTED;
    662
    663	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
    664		return H_STATE;
    665
    666	srcu_idx = srcu_read_lock(&kvm->srcu);
    667
    668	kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
    669		kvmppc_uvmem_drop_pages(memslot, kvm, false);
    670
    671	srcu_read_unlock(&kvm->srcu, srcu_idx);
    672
    673	kvm->arch.secure_guest = 0;
    674	uv_svm_terminate(kvm->arch.lpid);
    675
    676	return H_PARAMETER;
    677}
    678
    679/*
    680 * Get a free device PFN from the pool
    681 *
    682 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
    683 * PFN will be used to keep track of the secure page on HV side.
    684 *
    685 * Called with kvm->arch.uvmem_lock held
    686 */
    687static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
    688{
    689	struct page *dpage = NULL;
    690	unsigned long bit, uvmem_pfn;
    691	struct kvmppc_uvmem_page_pvt *pvt;
    692	unsigned long pfn_last, pfn_first;
    693
    694	pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
    695	pfn_last = pfn_first +
    696		   (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
    697
    698	spin_lock(&kvmppc_uvmem_bitmap_lock);
    699	bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
    700				  pfn_last - pfn_first);
    701	if (bit >= (pfn_last - pfn_first))
    702		goto out;
    703	bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
    704	spin_unlock(&kvmppc_uvmem_bitmap_lock);
    705
    706	pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
    707	if (!pvt)
    708		goto out_clear;
    709
    710	uvmem_pfn = bit + pfn_first;
    711	kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
    712
    713	pvt->gpa = gpa;
    714	pvt->kvm = kvm;
    715
    716	dpage = pfn_to_page(uvmem_pfn);
    717	dpage->zone_device_data = pvt;
    718	lock_page(dpage);
    719	return dpage;
    720out_clear:
    721	spin_lock(&kvmppc_uvmem_bitmap_lock);
    722	bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
    723out:
    724	spin_unlock(&kvmppc_uvmem_bitmap_lock);
    725	return NULL;
    726}
    727
    728/*
    729 * Alloc a PFN from private device memory pool. If @pagein is true,
    730 * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
    731 */
    732static int kvmppc_svm_page_in(struct vm_area_struct *vma,
    733		unsigned long start,
    734		unsigned long end, unsigned long gpa, struct kvm *kvm,
    735		unsigned long page_shift,
    736		bool pagein)
    737{
    738	unsigned long src_pfn, dst_pfn = 0;
    739	struct migrate_vma mig;
    740	struct page *spage;
    741	unsigned long pfn;
    742	struct page *dpage;
    743	int ret = 0;
    744
    745	memset(&mig, 0, sizeof(mig));
    746	mig.vma = vma;
    747	mig.start = start;
    748	mig.end = end;
    749	mig.src = &src_pfn;
    750	mig.dst = &dst_pfn;
    751	mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
    752
    753	ret = migrate_vma_setup(&mig);
    754	if (ret)
    755		return ret;
    756
    757	if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
    758		ret = -1;
    759		goto out_finalize;
    760	}
    761
    762	dpage = kvmppc_uvmem_get_page(gpa, kvm);
    763	if (!dpage) {
    764		ret = -1;
    765		goto out_finalize;
    766	}
    767
    768	if (pagein) {
    769		pfn = *mig.src >> MIGRATE_PFN_SHIFT;
    770		spage = migrate_pfn_to_page(*mig.src);
    771		if (spage) {
    772			ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
    773					gpa, 0, page_shift);
    774			if (ret)
    775				goto out_finalize;
    776		}
    777	}
    778
    779	*mig.dst = migrate_pfn(page_to_pfn(dpage));
    780	migrate_vma_pages(&mig);
    781out_finalize:
    782	migrate_vma_finalize(&mig);
    783	return ret;
    784}
    785
    786static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
    787		const struct kvm_memory_slot *memslot)
    788{
    789	unsigned long gfn = memslot->base_gfn;
    790	struct vm_area_struct *vma;
    791	unsigned long start, end;
    792	int ret = 0;
    793
    794	mmap_read_lock(kvm->mm);
    795	mutex_lock(&kvm->arch.uvmem_lock);
    796	while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
    797		ret = H_STATE;
    798		start = gfn_to_hva(kvm, gfn);
    799		if (kvm_is_error_hva(start))
    800			break;
    801
    802		end = start + (1UL << PAGE_SHIFT);
    803		vma = find_vma_intersection(kvm->mm, start, end);
    804		if (!vma || vma->vm_start > start || vma->vm_end < end)
    805			break;
    806
    807		ret = kvmppc_svm_page_in(vma, start, end,
    808				(gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
    809		if (ret) {
    810			ret = H_STATE;
    811			break;
    812		}
    813
    814		/* relinquish the cpu if needed */
    815		cond_resched();
    816	}
    817	mutex_unlock(&kvm->arch.uvmem_lock);
    818	mmap_read_unlock(kvm->mm);
    819	return ret;
    820}
    821
    822unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
    823{
    824	struct kvm_memslots *slots;
    825	struct kvm_memory_slot *memslot;
    826	int srcu_idx, bkt;
    827	long ret = H_SUCCESS;
    828
    829	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
    830		return H_UNSUPPORTED;
    831
    832	/* migrate any unmoved normal pfn to device pfns*/
    833	srcu_idx = srcu_read_lock(&kvm->srcu);
    834	slots = kvm_memslots(kvm);
    835	kvm_for_each_memslot(memslot, bkt, slots) {
    836		ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
    837		if (ret) {
    838			/*
    839			 * The pages will remain transitioned.
    840			 * Its the callers responsibility to
    841			 * terminate the VM, which will undo
    842			 * all state of the VM. Till then
    843			 * this VM is in a erroneous state.
    844			 * Its KVMPPC_SECURE_INIT_DONE will
    845			 * remain unset.
    846			 */
    847			ret = H_STATE;
    848			goto out;
    849		}
    850	}
    851
    852	kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
    853	pr_info("LPID %d went secure\n", kvm->arch.lpid);
    854
    855out:
    856	srcu_read_unlock(&kvm->srcu, srcu_idx);
    857	return ret;
    858}
    859
    860/*
    861 * Shares the page with HV, thus making it a normal page.
    862 *
    863 * - If the page is already secure, then provision a new page and share
    864 * - If the page is a normal page, share the existing page
    865 *
    866 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
    867 * to unmap the device page from QEMU's page tables.
    868 */
    869static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
    870		unsigned long page_shift)
    871{
    872
    873	int ret = H_PARAMETER;
    874	struct page *uvmem_page;
    875	struct kvmppc_uvmem_page_pvt *pvt;
    876	unsigned long pfn;
    877	unsigned long gfn = gpa >> page_shift;
    878	int srcu_idx;
    879	unsigned long uvmem_pfn;
    880
    881	srcu_idx = srcu_read_lock(&kvm->srcu);
    882	mutex_lock(&kvm->arch.uvmem_lock);
    883	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
    884		uvmem_page = pfn_to_page(uvmem_pfn);
    885		pvt = uvmem_page->zone_device_data;
    886		pvt->skip_page_out = true;
    887		/*
    888		 * do not drop the GFN. It is a valid GFN
    889		 * that is transitioned to a shared GFN.
    890		 */
    891		pvt->remove_gfn = false;
    892	}
    893
    894retry:
    895	mutex_unlock(&kvm->arch.uvmem_lock);
    896	pfn = gfn_to_pfn(kvm, gfn);
    897	if (is_error_noslot_pfn(pfn))
    898		goto out;
    899
    900	mutex_lock(&kvm->arch.uvmem_lock);
    901	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
    902		uvmem_page = pfn_to_page(uvmem_pfn);
    903		pvt = uvmem_page->zone_device_data;
    904		pvt->skip_page_out = true;
    905		pvt->remove_gfn = false; /* it continues to be a valid GFN */
    906		kvm_release_pfn_clean(pfn);
    907		goto retry;
    908	}
    909
    910	if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
    911				page_shift)) {
    912		kvmppc_gfn_shared(gfn, kvm);
    913		ret = H_SUCCESS;
    914	}
    915	kvm_release_pfn_clean(pfn);
    916	mutex_unlock(&kvm->arch.uvmem_lock);
    917out:
    918	srcu_read_unlock(&kvm->srcu, srcu_idx);
    919	return ret;
    920}
    921
    922/*
    923 * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
    924 *
    925 * H_PAGE_IN_SHARED flag makes the page shared which means that the same
    926 * memory in is visible from both UV and HV.
    927 */
    928unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
    929		unsigned long flags,
    930		unsigned long page_shift)
    931{
    932	unsigned long start, end;
    933	struct vm_area_struct *vma;
    934	int srcu_idx;
    935	unsigned long gfn = gpa >> page_shift;
    936	int ret;
    937
    938	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
    939		return H_UNSUPPORTED;
    940
    941	if (page_shift != PAGE_SHIFT)
    942		return H_P3;
    943
    944	if (flags & ~H_PAGE_IN_SHARED)
    945		return H_P2;
    946
    947	if (flags & H_PAGE_IN_SHARED)
    948		return kvmppc_share_page(kvm, gpa, page_shift);
    949
    950	ret = H_PARAMETER;
    951	srcu_idx = srcu_read_lock(&kvm->srcu);
    952	mmap_read_lock(kvm->mm);
    953
    954	start = gfn_to_hva(kvm, gfn);
    955	if (kvm_is_error_hva(start))
    956		goto out;
    957
    958	mutex_lock(&kvm->arch.uvmem_lock);
    959	/* Fail the page-in request of an already paged-in page */
    960	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
    961		goto out_unlock;
    962
    963	end = start + (1UL << page_shift);
    964	vma = find_vma_intersection(kvm->mm, start, end);
    965	if (!vma || vma->vm_start > start || vma->vm_end < end)
    966		goto out_unlock;
    967
    968	if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
    969				true))
    970		goto out_unlock;
    971
    972	ret = H_SUCCESS;
    973
    974out_unlock:
    975	mutex_unlock(&kvm->arch.uvmem_lock);
    976out:
    977	mmap_read_unlock(kvm->mm);
    978	srcu_read_unlock(&kvm->srcu, srcu_idx);
    979	return ret;
    980}
    981
    982
    983/*
    984 * Fault handler callback that gets called when HV touches any page that
    985 * has been moved to secure memory, we ask UV to give back the page by
    986 * issuing UV_PAGE_OUT uvcall.
    987 *
    988 * This eventually results in dropping of device PFN and the newly
    989 * provisioned page/PFN gets populated in QEMU page tables.
    990 */
    991static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
    992{
    993	struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
    994
    995	if (kvmppc_svm_page_out(vmf->vma, vmf->address,
    996				vmf->address + PAGE_SIZE, PAGE_SHIFT,
    997				pvt->kvm, pvt->gpa))
    998		return VM_FAULT_SIGBUS;
    999	else
   1000		return 0;
   1001}
   1002
   1003/*
   1004 * Release the device PFN back to the pool
   1005 *
   1006 * Gets called when secure GFN tranistions from a secure-PFN
   1007 * to a normal PFN during H_SVM_PAGE_OUT.
   1008 * Gets called with kvm->arch.uvmem_lock held.
   1009 */
   1010static void kvmppc_uvmem_page_free(struct page *page)
   1011{
   1012	unsigned long pfn = page_to_pfn(page) -
   1013			(kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
   1014	struct kvmppc_uvmem_page_pvt *pvt;
   1015
   1016	spin_lock(&kvmppc_uvmem_bitmap_lock);
   1017	bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
   1018	spin_unlock(&kvmppc_uvmem_bitmap_lock);
   1019
   1020	pvt = page->zone_device_data;
   1021	page->zone_device_data = NULL;
   1022	if (pvt->remove_gfn)
   1023		kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
   1024	else
   1025		kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
   1026	kfree(pvt);
   1027}
   1028
   1029static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
   1030	.page_free = kvmppc_uvmem_page_free,
   1031	.migrate_to_ram	= kvmppc_uvmem_migrate_to_ram,
   1032};
   1033
   1034/*
   1035 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
   1036 */
   1037unsigned long
   1038kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
   1039		      unsigned long flags, unsigned long page_shift)
   1040{
   1041	unsigned long gfn = gpa >> page_shift;
   1042	unsigned long start, end;
   1043	struct vm_area_struct *vma;
   1044	int srcu_idx;
   1045	int ret;
   1046
   1047	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
   1048		return H_UNSUPPORTED;
   1049
   1050	if (page_shift != PAGE_SHIFT)
   1051		return H_P3;
   1052
   1053	if (flags)
   1054		return H_P2;
   1055
   1056	ret = H_PARAMETER;
   1057	srcu_idx = srcu_read_lock(&kvm->srcu);
   1058	mmap_read_lock(kvm->mm);
   1059	start = gfn_to_hva(kvm, gfn);
   1060	if (kvm_is_error_hva(start))
   1061		goto out;
   1062
   1063	end = start + (1UL << page_shift);
   1064	vma = find_vma_intersection(kvm->mm, start, end);
   1065	if (!vma || vma->vm_start > start || vma->vm_end < end)
   1066		goto out;
   1067
   1068	if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
   1069		ret = H_SUCCESS;
   1070out:
   1071	mmap_read_unlock(kvm->mm);
   1072	srcu_read_unlock(&kvm->srcu, srcu_idx);
   1073	return ret;
   1074}
   1075
   1076int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
   1077{
   1078	unsigned long pfn;
   1079	int ret = U_SUCCESS;
   1080
   1081	pfn = gfn_to_pfn(kvm, gfn);
   1082	if (is_error_noslot_pfn(pfn))
   1083		return -EFAULT;
   1084
   1085	mutex_lock(&kvm->arch.uvmem_lock);
   1086	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
   1087		goto out;
   1088
   1089	ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
   1090			 0, PAGE_SHIFT);
   1091out:
   1092	kvm_release_pfn_clean(pfn);
   1093	mutex_unlock(&kvm->arch.uvmem_lock);
   1094	return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
   1095}
   1096
   1097int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
   1098{
   1099	int ret = __kvmppc_uvmem_memslot_create(kvm, new);
   1100
   1101	if (!ret)
   1102		ret = kvmppc_uv_migrate_mem_slot(kvm, new);
   1103
   1104	return ret;
   1105}
   1106
   1107void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
   1108{
   1109	__kvmppc_uvmem_memslot_delete(kvm, old);
   1110}
   1111
   1112static u64 kvmppc_get_secmem_size(void)
   1113{
   1114	struct device_node *np;
   1115	int i, len;
   1116	const __be32 *prop;
   1117	u64 size = 0;
   1118
   1119	/*
   1120	 * First try the new ibm,secure-memory nodes which supersede the
   1121	 * secure-memory-ranges property.
   1122	 * If we found some, no need to read the deprecated ones.
   1123	 */
   1124	for_each_compatible_node(np, NULL, "ibm,secure-memory") {
   1125		prop = of_get_property(np, "reg", &len);
   1126		if (!prop)
   1127			continue;
   1128		size += of_read_number(prop + 2, 2);
   1129	}
   1130	if (size)
   1131		return size;
   1132
   1133	np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
   1134	if (!np)
   1135		goto out;
   1136
   1137	prop = of_get_property(np, "secure-memory-ranges", &len);
   1138	if (!prop)
   1139		goto out_put;
   1140
   1141	for (i = 0; i < len / (sizeof(*prop) * 4); i++)
   1142		size += of_read_number(prop + (i * 4) + 2, 2);
   1143
   1144out_put:
   1145	of_node_put(np);
   1146out:
   1147	return size;
   1148}
   1149
   1150int kvmppc_uvmem_init(void)
   1151{
   1152	int ret = 0;
   1153	unsigned long size;
   1154	struct resource *res;
   1155	void *addr;
   1156	unsigned long pfn_last, pfn_first;
   1157
   1158	size = kvmppc_get_secmem_size();
   1159	if (!size) {
   1160		/*
   1161		 * Don't fail the initialization of kvm-hv module if
   1162		 * the platform doesn't export ibm,uv-firmware node.
   1163		 * Let normal guests run on such PEF-disabled platform.
   1164		 */
   1165		pr_info("KVMPPC-UVMEM: No support for secure guests\n");
   1166		goto out;
   1167	}
   1168
   1169	res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
   1170	if (IS_ERR(res)) {
   1171		ret = PTR_ERR(res);
   1172		goto out;
   1173	}
   1174
   1175	kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
   1176	kvmppc_uvmem_pgmap.range.start = res->start;
   1177	kvmppc_uvmem_pgmap.range.end = res->end;
   1178	kvmppc_uvmem_pgmap.nr_range = 1;
   1179	kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
   1180	/* just one global instance: */
   1181	kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
   1182	addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
   1183	if (IS_ERR(addr)) {
   1184		ret = PTR_ERR(addr);
   1185		goto out_free_region;
   1186	}
   1187
   1188	pfn_first = res->start >> PAGE_SHIFT;
   1189	pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
   1190	kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
   1191				      sizeof(unsigned long), GFP_KERNEL);
   1192	if (!kvmppc_uvmem_bitmap) {
   1193		ret = -ENOMEM;
   1194		goto out_unmap;
   1195	}
   1196
   1197	pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
   1198	return ret;
   1199out_unmap:
   1200	memunmap_pages(&kvmppc_uvmem_pgmap);
   1201out_free_region:
   1202	release_mem_region(res->start, size);
   1203out:
   1204	return ret;
   1205}
   1206
   1207void kvmppc_uvmem_free(void)
   1208{
   1209	if (!kvmppc_uvmem_bitmap)
   1210		return;
   1211
   1212	memunmap_pages(&kvmppc_uvmem_pgmap);
   1213	release_mem_region(kvmppc_uvmem_pgmap.range.start,
   1214			   range_len(&kvmppc_uvmem_pgmap.range));
   1215	kfree(kvmppc_uvmem_bitmap);
   1216}