Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Secure pages management: Migration of pages between normal and secure
0004  * memory of KVM guests.
0005  *
0006  * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
0007  */
0008 
0009 /*
0010  * A pseries guest can be run as secure guest on Ultravisor-enabled
0011  * POWER platforms. On such platforms, this driver will be used to manage
0012  * the movement of guest pages between the normal memory managed by
0013  * hypervisor (HV) and secure memory managed by Ultravisor (UV).
0014  *
0015  * The page-in or page-out requests from UV will come to HV as hcalls and
0016  * HV will call back into UV via ultracalls to satisfy these page requests.
0017  *
0018  * Private ZONE_DEVICE memory equal to the amount of secure memory
0019  * available in the platform for running secure guests is hotplugged.
0020  * Whenever a page belonging to the guest becomes secure, a page from this
0021  * private device memory is used to represent and track that secure page
0022  * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
0023  * shared between UV and HV. However such pages aren't represented by
0024  * device private memory and mappings to shared memory exist in both
0025  * UV and HV page tables.
0026  */
0027 
0028 /*
0029  * Notes on locking
0030  *
0031  * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
0032  * page-in and page-out requests for the same GPA. Concurrent accesses
0033  * can either come via UV (guest vCPUs requesting for same page)
0034  * or when HV and guest simultaneously access the same page.
0035  * This mutex serializes the migration of page from HV(normal) to
0036  * UV(secure) and vice versa. So the serialization points are around
0037  * migrate_vma routines and page-in/out routines.
0038  *
0039  * Per-guest mutex comes with a cost though. Mainly it serializes the
0040  * fault path as page-out can occur when HV faults on accessing secure
0041  * guest pages. Currently UV issues page-in requests for all the guest
0042  * PFNs one at a time during early boot (UV_ESM uvcall), so this is
0043  * not a cause for concern. Also currently the number of page-outs caused
0044  * by HV touching secure pages is very very low. If an when UV supports
0045  * overcommitting, then we might see concurrent guest driven page-outs.
0046  *
0047  * Locking order
0048  *
0049  * 1. kvm->srcu - Protects KVM memslots
0050  * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
0051  * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
0052  *               as sync-points for page-in/out
0053  */
0054 
0055 /*
0056  * Notes on page size
0057  *
0058  * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
0059  * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
0060  * secure GPAs at 64K page size and maintains one device PFN for each
0061  * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
0062  * for 64K page at a time.
0063  *
0064  * HV faulting on secure pages: When HV touches any secure page, it
0065  * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
0066  * UV splits and remaps the 2MB page if necessary and copies out the
0067  * required 64K page contents.
0068  *
0069  * Shared pages: Whenever guest shares a secure page, UV will split and
0070  * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
0071  *
0072  * HV invalidating a page: When a regular page belonging to secure
0073  * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
0074  * page size. Using 64K page size is correct here because any non-secure
0075  * page will essentially be of 64K page size. Splitting by UV during sharing
0076  * and page-out ensures this.
0077  *
0078  * Page fault handling: When HV handles page fault of a page belonging
0079  * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
0080  * Using 64K size is correct here too as UV would have split the 2MB page
0081  * into 64k mappings and would have done page-outs earlier.
0082  *
0083  * In summary, the current secure pages handling code in HV assumes
0084  * 64K page size and in fact fails any page-in/page-out requests of
0085  * non-64K size upfront. If and when UV starts supporting multiple
0086  * page-sizes, we need to break this assumption.
0087  */
0088 
0089 #include <linux/pagemap.h>
0090 #include <linux/migrate.h>
0091 #include <linux/kvm_host.h>
0092 #include <linux/ksm.h>
0093 #include <linux/of.h>
0094 #include <linux/memremap.h>
0095 #include <asm/ultravisor.h>
0096 #include <asm/mman.h>
0097 #include <asm/kvm_ppc.h>
0098 #include <asm/kvm_book3s_uvmem.h>
0099 
0100 static struct dev_pagemap kvmppc_uvmem_pgmap;
0101 static unsigned long *kvmppc_uvmem_bitmap;
0102 static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
0103 
0104 /*
0105  * States of a GFN
0106  * ---------------
0107  * The GFN can be in one of the following states.
0108  *
0109  * (a) Secure - The GFN is secure. The GFN is associated with
0110  *  a Secure VM, the contents of the GFN is not accessible
0111  *  to the Hypervisor.  This GFN can be backed by a secure-PFN,
0112  *  or can be backed by a normal-PFN with contents encrypted.
0113  *  The former is true when the GFN is paged-in into the
0114  *  ultravisor. The latter is true when the GFN is paged-out
0115  *  of the ultravisor.
0116  *
0117  * (b) Shared - The GFN is shared. The GFN is associated with a
0118  *  a secure VM. The contents of the GFN is accessible to
0119  *  Hypervisor. This GFN is backed by a normal-PFN and its
0120  *  content is un-encrypted.
0121  *
0122  * (c) Normal - The GFN is a normal. The GFN is associated with
0123  *  a normal VM. The contents of the GFN is accessible to
0124  *  the Hypervisor. Its content is never encrypted.
0125  *
0126  * States of a VM.
0127  * ---------------
0128  *
0129  * Normal VM:  A VM whose contents are always accessible to
0130  *  the hypervisor.  All its GFNs are normal-GFNs.
0131  *
0132  * Secure VM: A VM whose contents are not accessible to the
0133  *  hypervisor without the VM's consent.  Its GFNs are
0134  *  either Shared-GFN or Secure-GFNs.
0135  *
0136  * Transient VM: A Normal VM that is transitioning to secure VM.
0137  *  The transition starts on successful return of
0138  *  H_SVM_INIT_START, and ends on successful return
0139  *  of H_SVM_INIT_DONE. This transient VM, can have GFNs
0140  *  in any of the three states; i.e Secure-GFN, Shared-GFN,
0141  *  and Normal-GFN. The VM never executes in this state
0142  *  in supervisor-mode.
0143  *
0144  * Memory slot State.
0145  * -----------------------------
0146  *  The state of a memory slot mirrors the state of the
0147  *  VM the memory slot is associated with.
0148  *
0149  * VM State transition.
0150  * --------------------
0151  *
0152  *  A VM always starts in Normal Mode.
0153  *
0154  *  H_SVM_INIT_START moves the VM into transient state. During this
0155  *  time the Ultravisor may request some of its GFNs to be shared or
0156  *  secured. So its GFNs can be in one of the three GFN states.
0157  *
0158  *  H_SVM_INIT_DONE moves the VM entirely from transient state to
0159  *  secure-state. At this point any left-over normal-GFNs are
0160  *  transitioned to Secure-GFN.
0161  *
0162  *  H_SVM_INIT_ABORT moves the transient VM back to normal VM.
0163  *  All its GFNs are moved to Normal-GFNs.
0164  *
0165  *  UV_TERMINATE transitions the secure-VM back to normal-VM. All
0166  *  the secure-GFN and shared-GFNs are tranistioned to normal-GFN
0167  *  Note: The contents of the normal-GFN is undefined at this point.
0168  *
0169  * GFN state implementation:
0170  * -------------------------
0171  *
0172  * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
0173  * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
0174  * set, and contains the value of the secure-PFN.
0175  * It is associated with a normal-PFN; also called mem_pfn, when
0176  * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set.
0177  * The value of the normal-PFN is not tracked.
0178  *
0179  * Shared GFN is associated with a normal-PFN. Its pfn[] has
0180  * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
0181  * is not tracked.
0182  *
0183  * Normal GFN is associated with normal-PFN. Its pfn[] has
0184  * no flag set. The value of the normal-PFN is not tracked.
0185  *
0186  * Life cycle of a GFN
0187  * --------------------
0188  *
0189  * --------------------------------------------------------------
0190  * |        |     Share  |  Unshare | SVM       |H_SVM_INIT_DONE|
0191  * |        |operation   |operation | abort/    |               |
0192  * |        |            |          | terminate |               |
0193  * -------------------------------------------------------------
0194  * |        |            |          |           |               |
0195  * | Secure |     Shared | Secure   |Normal     |Secure         |
0196  * |        |            |          |           |               |
0197  * | Shared |     Shared | Secure   |Normal     |Shared         |
0198  * |        |            |          |           |               |
0199  * | Normal |     Shared | Secure   |Normal     |Secure         |
0200  * --------------------------------------------------------------
0201  *
0202  * Life cycle of a VM
0203  * --------------------
0204  *
0205  * --------------------------------------------------------------------
0206  * |         |  start    |  H_SVM_  |H_SVM_   |H_SVM_     |UV_SVM_    |
0207  * |         |  VM       |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE  |
0208  * |         |           |          |         |           |           |
0209  * --------- ----------------------------------------------------------
0210  * |         |           |          |         |           |           |
0211  * | Normal  | Normal    | Transient|Error    |Error      |Normal     |
0212  * |         |           |          |         |           |           |
0213  * | Secure  |   Error   | Error    |Error    |Error      |Normal     |
0214  * |         |           |          |         |           |           |
0215  * |Transient|   N/A     | Error    |Secure   |Normal     |Normal     |
0216  * --------------------------------------------------------------------
0217  */
0218 
0219 #define KVMPPC_GFN_UVMEM_PFN    (1UL << 63)
0220 #define KVMPPC_GFN_MEM_PFN  (1UL << 62)
0221 #define KVMPPC_GFN_SHARED   (1UL << 61)
0222 #define KVMPPC_GFN_SECURE   (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
0223 #define KVMPPC_GFN_FLAG_MASK    (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
0224 #define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK)
0225 
0226 struct kvmppc_uvmem_slot {
0227     struct list_head list;
0228     unsigned long nr_pfns;
0229     unsigned long base_pfn;
0230     unsigned long *pfns;
0231 };
0232 struct kvmppc_uvmem_page_pvt {
0233     struct kvm *kvm;
0234     unsigned long gpa;
0235     bool skip_page_out;
0236     bool remove_gfn;
0237 };
0238 
0239 bool kvmppc_uvmem_available(void)
0240 {
0241     /*
0242      * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
0243      * and our data structures have been initialized successfully.
0244      */
0245     return !!kvmppc_uvmem_bitmap;
0246 }
0247 
0248 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
0249 {
0250     struct kvmppc_uvmem_slot *p;
0251 
0252     p = kzalloc(sizeof(*p), GFP_KERNEL);
0253     if (!p)
0254         return -ENOMEM;
0255     p->pfns = vcalloc(slot->npages, sizeof(*p->pfns));
0256     if (!p->pfns) {
0257         kfree(p);
0258         return -ENOMEM;
0259     }
0260     p->nr_pfns = slot->npages;
0261     p->base_pfn = slot->base_gfn;
0262 
0263     mutex_lock(&kvm->arch.uvmem_lock);
0264     list_add(&p->list, &kvm->arch.uvmem_pfns);
0265     mutex_unlock(&kvm->arch.uvmem_lock);
0266 
0267     return 0;
0268 }
0269 
0270 /*
0271  * All device PFNs are already released by the time we come here.
0272  */
0273 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
0274 {
0275     struct kvmppc_uvmem_slot *p, *next;
0276 
0277     mutex_lock(&kvm->arch.uvmem_lock);
0278     list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
0279         if (p->base_pfn == slot->base_gfn) {
0280             vfree(p->pfns);
0281             list_del(&p->list);
0282             kfree(p);
0283             break;
0284         }
0285     }
0286     mutex_unlock(&kvm->arch.uvmem_lock);
0287 }
0288 
0289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
0290             unsigned long flag, unsigned long uvmem_pfn)
0291 {
0292     struct kvmppc_uvmem_slot *p;
0293 
0294     list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
0295         if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
0296             unsigned long index = gfn - p->base_pfn;
0297 
0298             if (flag == KVMPPC_GFN_UVMEM_PFN)
0299                 p->pfns[index] = uvmem_pfn | flag;
0300             else
0301                 p->pfns[index] = flag;
0302             return;
0303         }
0304     }
0305 }
0306 
0307 /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
0308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
0309             unsigned long uvmem_pfn, struct kvm *kvm)
0310 {
0311     kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
0312 }
0313 
0314 /* mark the GFN as secure-GFN associated with a memory-PFN. */
0315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
0316 {
0317     kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
0318 }
0319 
0320 /* mark the GFN as a shared GFN. */
0321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
0322 {
0323     kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
0324 }
0325 
0326 /* mark the GFN as a non-existent GFN. */
0327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
0328 {
0329     kvmppc_mark_gfn(gfn, kvm, 0, 0);
0330 }
0331 
0332 /* return true, if the GFN is a secure-GFN backed by a secure-PFN */
0333 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
0334                     unsigned long *uvmem_pfn)
0335 {
0336     struct kvmppc_uvmem_slot *p;
0337 
0338     list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
0339         if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
0340             unsigned long index = gfn - p->base_pfn;
0341 
0342             if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
0343                 if (uvmem_pfn)
0344                     *uvmem_pfn = p->pfns[index] &
0345                              KVMPPC_GFN_PFN_MASK;
0346                 return true;
0347             } else
0348                 return false;
0349         }
0350     }
0351     return false;
0352 }
0353 
0354 /*
0355  * starting from *gfn search for the next available GFN that is not yet
0356  * transitioned to a secure GFN.  return the value of that GFN in *gfn.  If a
0357  * GFN is found, return true, else return false
0358  *
0359  * Must be called with kvm->arch.uvmem_lock  held.
0360  */
0361 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
0362         struct kvm *kvm, unsigned long *gfn)
0363 {
0364     struct kvmppc_uvmem_slot *p = NULL, *iter;
0365     bool ret = false;
0366     unsigned long i;
0367 
0368     list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
0369         if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
0370             p = iter;
0371             break;
0372         }
0373     if (!p)
0374         return ret;
0375     /*
0376      * The code below assumes, one to one correspondence between
0377      * kvmppc_uvmem_slot and memslot.
0378      */
0379     for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
0380         unsigned long index = i - p->base_pfn;
0381 
0382         if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
0383             *gfn = i;
0384             ret = true;
0385             break;
0386         }
0387     }
0388     return ret;
0389 }
0390 
0391 static int kvmppc_memslot_page_merge(struct kvm *kvm,
0392         const struct kvm_memory_slot *memslot, bool merge)
0393 {
0394     unsigned long gfn = memslot->base_gfn;
0395     unsigned long end, start = gfn_to_hva(kvm, gfn);
0396     int ret = 0;
0397     struct vm_area_struct *vma;
0398     int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
0399 
0400     if (kvm_is_error_hva(start))
0401         return H_STATE;
0402 
0403     end = start + (memslot->npages << PAGE_SHIFT);
0404 
0405     mmap_write_lock(kvm->mm);
0406     do {
0407         vma = find_vma_intersection(kvm->mm, start, end);
0408         if (!vma) {
0409             ret = H_STATE;
0410             break;
0411         }
0412         ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
0413               merge_flag, &vma->vm_flags);
0414         if (ret) {
0415             ret = H_STATE;
0416             break;
0417         }
0418         start = vma->vm_end;
0419     } while (end > vma->vm_end);
0420 
0421     mmap_write_unlock(kvm->mm);
0422     return ret;
0423 }
0424 
0425 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
0426         const struct kvm_memory_slot *memslot)
0427 {
0428     uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
0429     kvmppc_uvmem_slot_free(kvm, memslot);
0430     kvmppc_memslot_page_merge(kvm, memslot, true);
0431 }
0432 
0433 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
0434         const struct kvm_memory_slot *memslot)
0435 {
0436     int ret = H_PARAMETER;
0437 
0438     if (kvmppc_memslot_page_merge(kvm, memslot, false))
0439         return ret;
0440 
0441     if (kvmppc_uvmem_slot_init(kvm, memslot))
0442         goto out1;
0443 
0444     ret = uv_register_mem_slot(kvm->arch.lpid,
0445                    memslot->base_gfn << PAGE_SHIFT,
0446                    memslot->npages * PAGE_SIZE,
0447                    0, memslot->id);
0448     if (ret < 0) {
0449         ret = H_PARAMETER;
0450         goto out;
0451     }
0452     return 0;
0453 out:
0454     kvmppc_uvmem_slot_free(kvm, memslot);
0455 out1:
0456     kvmppc_memslot_page_merge(kvm, memslot, true);
0457     return ret;
0458 }
0459 
0460 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
0461 {
0462     struct kvm_memslots *slots;
0463     struct kvm_memory_slot *memslot, *m;
0464     int ret = H_SUCCESS;
0465     int srcu_idx, bkt;
0466 
0467     kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
0468 
0469     if (!kvmppc_uvmem_bitmap)
0470         return H_UNSUPPORTED;
0471 
0472     /* Only radix guests can be secure guests */
0473     if (!kvm_is_radix(kvm))
0474         return H_UNSUPPORTED;
0475 
0476     /* NAK the transition to secure if not enabled */
0477     if (!kvm->arch.svm_enabled)
0478         return H_AUTHORITY;
0479 
0480     srcu_idx = srcu_read_lock(&kvm->srcu);
0481 
0482     /* register the memslot */
0483     slots = kvm_memslots(kvm);
0484     kvm_for_each_memslot(memslot, bkt, slots) {
0485         ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
0486         if (ret)
0487             break;
0488     }
0489 
0490     if (ret) {
0491         slots = kvm_memslots(kvm);
0492         kvm_for_each_memslot(m, bkt, slots) {
0493             if (m == memslot)
0494                 break;
0495             __kvmppc_uvmem_memslot_delete(kvm, memslot);
0496         }
0497     }
0498 
0499     srcu_read_unlock(&kvm->srcu, srcu_idx);
0500     return ret;
0501 }
0502 
0503 /*
0504  * Provision a new page on HV side and copy over the contents
0505  * from secure memory using UV_PAGE_OUT uvcall.
0506  * Caller must held kvm->arch.uvmem_lock.
0507  */
0508 static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
0509         unsigned long start,
0510         unsigned long end, unsigned long page_shift,
0511         struct kvm *kvm, unsigned long gpa)
0512 {
0513     unsigned long src_pfn, dst_pfn = 0;
0514     struct migrate_vma mig;
0515     struct page *dpage, *spage;
0516     struct kvmppc_uvmem_page_pvt *pvt;
0517     unsigned long pfn;
0518     int ret = U_SUCCESS;
0519 
0520     memset(&mig, 0, sizeof(mig));
0521     mig.vma = vma;
0522     mig.start = start;
0523     mig.end = end;
0524     mig.src = &src_pfn;
0525     mig.dst = &dst_pfn;
0526     mig.pgmap_owner = &kvmppc_uvmem_pgmap;
0527     mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
0528 
0529     /* The requested page is already paged-out, nothing to do */
0530     if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
0531         return ret;
0532 
0533     ret = migrate_vma_setup(&mig);
0534     if (ret)
0535         return -1;
0536 
0537     spage = migrate_pfn_to_page(*mig.src);
0538     if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
0539         goto out_finalize;
0540 
0541     if (!is_zone_device_page(spage))
0542         goto out_finalize;
0543 
0544     dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
0545     if (!dpage) {
0546         ret = -1;
0547         goto out_finalize;
0548     }
0549 
0550     lock_page(dpage);
0551     pvt = spage->zone_device_data;
0552     pfn = page_to_pfn(dpage);
0553 
0554     /*
0555      * This function is used in two cases:
0556      * - When HV touches a secure page, for which we do UV_PAGE_OUT
0557      * - When a secure page is converted to shared page, we *get*
0558      *   the page to essentially unmap the device page. In this
0559      *   case we skip page-out.
0560      */
0561     if (!pvt->skip_page_out)
0562         ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
0563                   gpa, 0, page_shift);
0564 
0565     if (ret == U_SUCCESS)
0566         *mig.dst = migrate_pfn(pfn);
0567     else {
0568         unlock_page(dpage);
0569         __free_page(dpage);
0570         goto out_finalize;
0571     }
0572 
0573     migrate_vma_pages(&mig);
0574 
0575 out_finalize:
0576     migrate_vma_finalize(&mig);
0577     return ret;
0578 }
0579 
0580 static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
0581                       unsigned long start, unsigned long end,
0582                       unsigned long page_shift,
0583                       struct kvm *kvm, unsigned long gpa)
0584 {
0585     int ret;
0586 
0587     mutex_lock(&kvm->arch.uvmem_lock);
0588     ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
0589     mutex_unlock(&kvm->arch.uvmem_lock);
0590 
0591     return ret;
0592 }
0593 
0594 /*
0595  * Drop device pages that we maintain for the secure guest
0596  *
0597  * We first mark the pages to be skipped from UV_PAGE_OUT when there
0598  * is HV side fault on these pages. Next we *get* these pages, forcing
0599  * fault on them, do fault time migration to replace the device PTEs in
0600  * QEMU page table with normal PTEs from newly allocated pages.
0601  */
0602 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
0603                  struct kvm *kvm, bool skip_page_out)
0604 {
0605     int i;
0606     struct kvmppc_uvmem_page_pvt *pvt;
0607     struct page *uvmem_page;
0608     struct vm_area_struct *vma = NULL;
0609     unsigned long uvmem_pfn, gfn;
0610     unsigned long addr;
0611 
0612     mmap_read_lock(kvm->mm);
0613 
0614     addr = slot->userspace_addr;
0615 
0616     gfn = slot->base_gfn;
0617     for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
0618 
0619         /* Fetch the VMA if addr is not in the latest fetched one */
0620         if (!vma || addr >= vma->vm_end) {
0621             vma = vma_lookup(kvm->mm, addr);
0622             if (!vma) {
0623                 pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
0624                 break;
0625             }
0626         }
0627 
0628         mutex_lock(&kvm->arch.uvmem_lock);
0629 
0630         if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
0631             uvmem_page = pfn_to_page(uvmem_pfn);
0632             pvt = uvmem_page->zone_device_data;
0633             pvt->skip_page_out = skip_page_out;
0634             pvt->remove_gfn = true;
0635 
0636             if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
0637                           PAGE_SHIFT, kvm, pvt->gpa))
0638                 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
0639                        pvt->gpa, addr);
0640         } else {
0641             /* Remove the shared flag if any */
0642             kvmppc_gfn_remove(gfn, kvm);
0643         }
0644 
0645         mutex_unlock(&kvm->arch.uvmem_lock);
0646     }
0647 
0648     mmap_read_unlock(kvm->mm);
0649 }
0650 
0651 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
0652 {
0653     int srcu_idx, bkt;
0654     struct kvm_memory_slot *memslot;
0655 
0656     /*
0657      * Expect to be called only after INIT_START and before INIT_DONE.
0658      * If INIT_DONE was completed, use normal VM termination sequence.
0659      */
0660     if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
0661         return H_UNSUPPORTED;
0662 
0663     if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
0664         return H_STATE;
0665 
0666     srcu_idx = srcu_read_lock(&kvm->srcu);
0667 
0668     kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
0669         kvmppc_uvmem_drop_pages(memslot, kvm, false);
0670 
0671     srcu_read_unlock(&kvm->srcu, srcu_idx);
0672 
0673     kvm->arch.secure_guest = 0;
0674     uv_svm_terminate(kvm->arch.lpid);
0675 
0676     return H_PARAMETER;
0677 }
0678 
0679 /*
0680  * Get a free device PFN from the pool
0681  *
0682  * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
0683  * PFN will be used to keep track of the secure page on HV side.
0684  *
0685  * Called with kvm->arch.uvmem_lock held
0686  */
0687 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
0688 {
0689     struct page *dpage = NULL;
0690     unsigned long bit, uvmem_pfn;
0691     struct kvmppc_uvmem_page_pvt *pvt;
0692     unsigned long pfn_last, pfn_first;
0693 
0694     pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
0695     pfn_last = pfn_first +
0696            (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
0697 
0698     spin_lock(&kvmppc_uvmem_bitmap_lock);
0699     bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
0700                   pfn_last - pfn_first);
0701     if (bit >= (pfn_last - pfn_first))
0702         goto out;
0703     bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
0704     spin_unlock(&kvmppc_uvmem_bitmap_lock);
0705 
0706     pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
0707     if (!pvt)
0708         goto out_clear;
0709 
0710     uvmem_pfn = bit + pfn_first;
0711     kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
0712 
0713     pvt->gpa = gpa;
0714     pvt->kvm = kvm;
0715 
0716     dpage = pfn_to_page(uvmem_pfn);
0717     dpage->zone_device_data = pvt;
0718     lock_page(dpage);
0719     return dpage;
0720 out_clear:
0721     spin_lock(&kvmppc_uvmem_bitmap_lock);
0722     bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
0723 out:
0724     spin_unlock(&kvmppc_uvmem_bitmap_lock);
0725     return NULL;
0726 }
0727 
0728 /*
0729  * Alloc a PFN from private device memory pool. If @pagein is true,
0730  * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
0731  */
0732 static int kvmppc_svm_page_in(struct vm_area_struct *vma,
0733         unsigned long start,
0734         unsigned long end, unsigned long gpa, struct kvm *kvm,
0735         unsigned long page_shift,
0736         bool pagein)
0737 {
0738     unsigned long src_pfn, dst_pfn = 0;
0739     struct migrate_vma mig;
0740     struct page *spage;
0741     unsigned long pfn;
0742     struct page *dpage;
0743     int ret = 0;
0744 
0745     memset(&mig, 0, sizeof(mig));
0746     mig.vma = vma;
0747     mig.start = start;
0748     mig.end = end;
0749     mig.src = &src_pfn;
0750     mig.dst = &dst_pfn;
0751     mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
0752 
0753     ret = migrate_vma_setup(&mig);
0754     if (ret)
0755         return ret;
0756 
0757     if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
0758         ret = -1;
0759         goto out_finalize;
0760     }
0761 
0762     dpage = kvmppc_uvmem_get_page(gpa, kvm);
0763     if (!dpage) {
0764         ret = -1;
0765         goto out_finalize;
0766     }
0767 
0768     if (pagein) {
0769         pfn = *mig.src >> MIGRATE_PFN_SHIFT;
0770         spage = migrate_pfn_to_page(*mig.src);
0771         if (spage) {
0772             ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
0773                     gpa, 0, page_shift);
0774             if (ret)
0775                 goto out_finalize;
0776         }
0777     }
0778 
0779     *mig.dst = migrate_pfn(page_to_pfn(dpage));
0780     migrate_vma_pages(&mig);
0781 out_finalize:
0782     migrate_vma_finalize(&mig);
0783     return ret;
0784 }
0785 
0786 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
0787         const struct kvm_memory_slot *memslot)
0788 {
0789     unsigned long gfn = memslot->base_gfn;
0790     struct vm_area_struct *vma;
0791     unsigned long start, end;
0792     int ret = 0;
0793 
0794     mmap_read_lock(kvm->mm);
0795     mutex_lock(&kvm->arch.uvmem_lock);
0796     while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
0797         ret = H_STATE;
0798         start = gfn_to_hva(kvm, gfn);
0799         if (kvm_is_error_hva(start))
0800             break;
0801 
0802         end = start + (1UL << PAGE_SHIFT);
0803         vma = find_vma_intersection(kvm->mm, start, end);
0804         if (!vma || vma->vm_start > start || vma->vm_end < end)
0805             break;
0806 
0807         ret = kvmppc_svm_page_in(vma, start, end,
0808                 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
0809         if (ret) {
0810             ret = H_STATE;
0811             break;
0812         }
0813 
0814         /* relinquish the cpu if needed */
0815         cond_resched();
0816     }
0817     mutex_unlock(&kvm->arch.uvmem_lock);
0818     mmap_read_unlock(kvm->mm);
0819     return ret;
0820 }
0821 
0822 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
0823 {
0824     struct kvm_memslots *slots;
0825     struct kvm_memory_slot *memslot;
0826     int srcu_idx, bkt;
0827     long ret = H_SUCCESS;
0828 
0829     if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
0830         return H_UNSUPPORTED;
0831 
0832     /* migrate any unmoved normal pfn to device pfns*/
0833     srcu_idx = srcu_read_lock(&kvm->srcu);
0834     slots = kvm_memslots(kvm);
0835     kvm_for_each_memslot(memslot, bkt, slots) {
0836         ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
0837         if (ret) {
0838             /*
0839              * The pages will remain transitioned.
0840              * Its the callers responsibility to
0841              * terminate the VM, which will undo
0842              * all state of the VM. Till then
0843              * this VM is in a erroneous state.
0844              * Its KVMPPC_SECURE_INIT_DONE will
0845              * remain unset.
0846              */
0847             ret = H_STATE;
0848             goto out;
0849         }
0850     }
0851 
0852     kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
0853     pr_info("LPID %d went secure\n", kvm->arch.lpid);
0854 
0855 out:
0856     srcu_read_unlock(&kvm->srcu, srcu_idx);
0857     return ret;
0858 }
0859 
0860 /*
0861  * Shares the page with HV, thus making it a normal page.
0862  *
0863  * - If the page is already secure, then provision a new page and share
0864  * - If the page is a normal page, share the existing page
0865  *
0866  * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
0867  * to unmap the device page from QEMU's page tables.
0868  */
0869 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
0870         unsigned long page_shift)
0871 {
0872 
0873     int ret = H_PARAMETER;
0874     struct page *uvmem_page;
0875     struct kvmppc_uvmem_page_pvt *pvt;
0876     unsigned long pfn;
0877     unsigned long gfn = gpa >> page_shift;
0878     int srcu_idx;
0879     unsigned long uvmem_pfn;
0880 
0881     srcu_idx = srcu_read_lock(&kvm->srcu);
0882     mutex_lock(&kvm->arch.uvmem_lock);
0883     if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
0884         uvmem_page = pfn_to_page(uvmem_pfn);
0885         pvt = uvmem_page->zone_device_data;
0886         pvt->skip_page_out = true;
0887         /*
0888          * do not drop the GFN. It is a valid GFN
0889          * that is transitioned to a shared GFN.
0890          */
0891         pvt->remove_gfn = false;
0892     }
0893 
0894 retry:
0895     mutex_unlock(&kvm->arch.uvmem_lock);
0896     pfn = gfn_to_pfn(kvm, gfn);
0897     if (is_error_noslot_pfn(pfn))
0898         goto out;
0899 
0900     mutex_lock(&kvm->arch.uvmem_lock);
0901     if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
0902         uvmem_page = pfn_to_page(uvmem_pfn);
0903         pvt = uvmem_page->zone_device_data;
0904         pvt->skip_page_out = true;
0905         pvt->remove_gfn = false; /* it continues to be a valid GFN */
0906         kvm_release_pfn_clean(pfn);
0907         goto retry;
0908     }
0909 
0910     if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
0911                 page_shift)) {
0912         kvmppc_gfn_shared(gfn, kvm);
0913         ret = H_SUCCESS;
0914     }
0915     kvm_release_pfn_clean(pfn);
0916     mutex_unlock(&kvm->arch.uvmem_lock);
0917 out:
0918     srcu_read_unlock(&kvm->srcu, srcu_idx);
0919     return ret;
0920 }
0921 
0922 /*
0923  * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
0924  *
0925  * H_PAGE_IN_SHARED flag makes the page shared which means that the same
0926  * memory in is visible from both UV and HV.
0927  */
0928 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
0929         unsigned long flags,
0930         unsigned long page_shift)
0931 {
0932     unsigned long start, end;
0933     struct vm_area_struct *vma;
0934     int srcu_idx;
0935     unsigned long gfn = gpa >> page_shift;
0936     int ret;
0937 
0938     if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
0939         return H_UNSUPPORTED;
0940 
0941     if (page_shift != PAGE_SHIFT)
0942         return H_P3;
0943 
0944     if (flags & ~H_PAGE_IN_SHARED)
0945         return H_P2;
0946 
0947     if (flags & H_PAGE_IN_SHARED)
0948         return kvmppc_share_page(kvm, gpa, page_shift);
0949 
0950     ret = H_PARAMETER;
0951     srcu_idx = srcu_read_lock(&kvm->srcu);
0952     mmap_read_lock(kvm->mm);
0953 
0954     start = gfn_to_hva(kvm, gfn);
0955     if (kvm_is_error_hva(start))
0956         goto out;
0957 
0958     mutex_lock(&kvm->arch.uvmem_lock);
0959     /* Fail the page-in request of an already paged-in page */
0960     if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
0961         goto out_unlock;
0962 
0963     end = start + (1UL << page_shift);
0964     vma = find_vma_intersection(kvm->mm, start, end);
0965     if (!vma || vma->vm_start > start || vma->vm_end < end)
0966         goto out_unlock;
0967 
0968     if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
0969                 true))
0970         goto out_unlock;
0971 
0972     ret = H_SUCCESS;
0973 
0974 out_unlock:
0975     mutex_unlock(&kvm->arch.uvmem_lock);
0976 out:
0977     mmap_read_unlock(kvm->mm);
0978     srcu_read_unlock(&kvm->srcu, srcu_idx);
0979     return ret;
0980 }
0981 
0982 
0983 /*
0984  * Fault handler callback that gets called when HV touches any page that
0985  * has been moved to secure memory, we ask UV to give back the page by
0986  * issuing UV_PAGE_OUT uvcall.
0987  *
0988  * This eventually results in dropping of device PFN and the newly
0989  * provisioned page/PFN gets populated in QEMU page tables.
0990  */
0991 static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
0992 {
0993     struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
0994 
0995     if (kvmppc_svm_page_out(vmf->vma, vmf->address,
0996                 vmf->address + PAGE_SIZE, PAGE_SHIFT,
0997                 pvt->kvm, pvt->gpa))
0998         return VM_FAULT_SIGBUS;
0999     else
1000         return 0;
1001 }
1002 
1003 /*
1004  * Release the device PFN back to the pool
1005  *
1006  * Gets called when secure GFN tranistions from a secure-PFN
1007  * to a normal PFN during H_SVM_PAGE_OUT.
1008  * Gets called with kvm->arch.uvmem_lock held.
1009  */
1010 static void kvmppc_uvmem_page_free(struct page *page)
1011 {
1012     unsigned long pfn = page_to_pfn(page) -
1013             (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
1014     struct kvmppc_uvmem_page_pvt *pvt;
1015 
1016     spin_lock(&kvmppc_uvmem_bitmap_lock);
1017     bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
1018     spin_unlock(&kvmppc_uvmem_bitmap_lock);
1019 
1020     pvt = page->zone_device_data;
1021     page->zone_device_data = NULL;
1022     if (pvt->remove_gfn)
1023         kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1024     else
1025         kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1026     kfree(pvt);
1027 }
1028 
1029 static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
1030     .page_free = kvmppc_uvmem_page_free,
1031     .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
1032 };
1033 
1034 /*
1035  * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
1036  */
1037 unsigned long
1038 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1039               unsigned long flags, unsigned long page_shift)
1040 {
1041     unsigned long gfn = gpa >> page_shift;
1042     unsigned long start, end;
1043     struct vm_area_struct *vma;
1044     int srcu_idx;
1045     int ret;
1046 
1047     if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1048         return H_UNSUPPORTED;
1049 
1050     if (page_shift != PAGE_SHIFT)
1051         return H_P3;
1052 
1053     if (flags)
1054         return H_P2;
1055 
1056     ret = H_PARAMETER;
1057     srcu_idx = srcu_read_lock(&kvm->srcu);
1058     mmap_read_lock(kvm->mm);
1059     start = gfn_to_hva(kvm, gfn);
1060     if (kvm_is_error_hva(start))
1061         goto out;
1062 
1063     end = start + (1UL << page_shift);
1064     vma = find_vma_intersection(kvm->mm, start, end);
1065     if (!vma || vma->vm_start > start || vma->vm_end < end)
1066         goto out;
1067 
1068     if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
1069         ret = H_SUCCESS;
1070 out:
1071     mmap_read_unlock(kvm->mm);
1072     srcu_read_unlock(&kvm->srcu, srcu_idx);
1073     return ret;
1074 }
1075 
1076 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1077 {
1078     unsigned long pfn;
1079     int ret = U_SUCCESS;
1080 
1081     pfn = gfn_to_pfn(kvm, gfn);
1082     if (is_error_noslot_pfn(pfn))
1083         return -EFAULT;
1084 
1085     mutex_lock(&kvm->arch.uvmem_lock);
1086     if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1087         goto out;
1088 
1089     ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1090              0, PAGE_SHIFT);
1091 out:
1092     kvm_release_pfn_clean(pfn);
1093     mutex_unlock(&kvm->arch.uvmem_lock);
1094     return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
1095 }
1096 
1097 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1098 {
1099     int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1100 
1101     if (!ret)
1102         ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1103 
1104     return ret;
1105 }
1106 
1107 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1108 {
1109     __kvmppc_uvmem_memslot_delete(kvm, old);
1110 }
1111 
1112 static u64 kvmppc_get_secmem_size(void)
1113 {
1114     struct device_node *np;
1115     int i, len;
1116     const __be32 *prop;
1117     u64 size = 0;
1118 
1119     /*
1120      * First try the new ibm,secure-memory nodes which supersede the
1121      * secure-memory-ranges property.
1122      * If we found some, no need to read the deprecated ones.
1123      */
1124     for_each_compatible_node(np, NULL, "ibm,secure-memory") {
1125         prop = of_get_property(np, "reg", &len);
1126         if (!prop)
1127             continue;
1128         size += of_read_number(prop + 2, 2);
1129     }
1130     if (size)
1131         return size;
1132 
1133     np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
1134     if (!np)
1135         goto out;
1136 
1137     prop = of_get_property(np, "secure-memory-ranges", &len);
1138     if (!prop)
1139         goto out_put;
1140 
1141     for (i = 0; i < len / (sizeof(*prop) * 4); i++)
1142         size += of_read_number(prop + (i * 4) + 2, 2);
1143 
1144 out_put:
1145     of_node_put(np);
1146 out:
1147     return size;
1148 }
1149 
1150 int kvmppc_uvmem_init(void)
1151 {
1152     int ret = 0;
1153     unsigned long size;
1154     struct resource *res;
1155     void *addr;
1156     unsigned long pfn_last, pfn_first;
1157 
1158     size = kvmppc_get_secmem_size();
1159     if (!size) {
1160         /*
1161          * Don't fail the initialization of kvm-hv module if
1162          * the platform doesn't export ibm,uv-firmware node.
1163          * Let normal guests run on such PEF-disabled platform.
1164          */
1165         pr_info("KVMPPC-UVMEM: No support for secure guests\n");
1166         goto out;
1167     }
1168 
1169     res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
1170     if (IS_ERR(res)) {
1171         ret = PTR_ERR(res);
1172         goto out;
1173     }
1174 
1175     kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1176     kvmppc_uvmem_pgmap.range.start = res->start;
1177     kvmppc_uvmem_pgmap.range.end = res->end;
1178     kvmppc_uvmem_pgmap.nr_range = 1;
1179     kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
1180     /* just one global instance: */
1181     kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
1182     addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
1183     if (IS_ERR(addr)) {
1184         ret = PTR_ERR(addr);
1185         goto out_free_region;
1186     }
1187 
1188     pfn_first = res->start >> PAGE_SHIFT;
1189     pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
1190     kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
1191                       sizeof(unsigned long), GFP_KERNEL);
1192     if (!kvmppc_uvmem_bitmap) {
1193         ret = -ENOMEM;
1194         goto out_unmap;
1195     }
1196 
1197     pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
1198     return ret;
1199 out_unmap:
1200     memunmap_pages(&kvmppc_uvmem_pgmap);
1201 out_free_region:
1202     release_mem_region(res->start, size);
1203 out:
1204     return ret;
1205 }
1206 
1207 void kvmppc_uvmem_free(void)
1208 {
1209     if (!kvmppc_uvmem_bitmap)
1210         return;
1211 
1212     memunmap_pages(&kvmppc_uvmem_pgmap);
1213     release_mem_region(kvmppc_uvmem_pgmap.range.start,
1214                range_len(&kvmppc_uvmem_pgmap.range));
1215     kfree(kvmppc_uvmem_bitmap);
1216 }