Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Kernel-based Virtual Machine driver for Linux
0004  *
0005  * This module enables kernel and guest-mode vCPU access to guest physical
0006  * memory with suitable invalidation mechanisms.
0007  *
0008  * Copyright © 2021 Amazon.com, Inc. or its affiliates.
0009  *
0010  * Authors:
0011  *   David Woodhouse <dwmw2@infradead.org>
0012  */
0013 
0014 #include <linux/kvm_host.h>
0015 #include <linux/kvm.h>
0016 #include <linux/highmem.h>
0017 #include <linux/module.h>
0018 #include <linux/errno.h>
0019 
0020 #include "kvm_mm.h"
0021 
0022 /*
0023  * MMU notifier 'invalidate_range_start' hook.
0024  */
0025 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
0026                        unsigned long end, bool may_block)
0027 {
0028     DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
0029     struct gfn_to_pfn_cache *gpc;
0030     bool evict_vcpus = false;
0031 
0032     spin_lock(&kvm->gpc_lock);
0033     list_for_each_entry(gpc, &kvm->gpc_list, list) {
0034         write_lock_irq(&gpc->lock);
0035 
0036         /* Only a single page so no need to care about length */
0037         if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
0038             gpc->uhva >= start && gpc->uhva < end) {
0039             gpc->valid = false;
0040 
0041             /*
0042              * If a guest vCPU could be using the physical address,
0043              * it needs to be forced out of guest mode.
0044              */
0045             if (gpc->usage & KVM_GUEST_USES_PFN) {
0046                 if (!evict_vcpus) {
0047                     evict_vcpus = true;
0048                     bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
0049                 }
0050                 __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
0051             }
0052         }
0053         write_unlock_irq(&gpc->lock);
0054     }
0055     spin_unlock(&kvm->gpc_lock);
0056 
0057     if (evict_vcpus) {
0058         /*
0059          * KVM needs to ensure the vCPU is fully out of guest context
0060          * before allowing the invalidation to continue.
0061          */
0062         unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
0063         bool called;
0064 
0065         /*
0066          * If the OOM reaper is active, then all vCPUs should have
0067          * been stopped already, so perform the request without
0068          * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
0069          */
0070         if (!may_block)
0071             req &= ~KVM_REQUEST_WAIT;
0072 
0073         called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
0074 
0075         WARN_ON_ONCE(called && !may_block);
0076     }
0077 }
0078 
0079 bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
0080                 gpa_t gpa, unsigned long len)
0081 {
0082     struct kvm_memslots *slots = kvm_memslots(kvm);
0083 
0084     if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
0085         return false;
0086 
0087     if (gpc->gpa != gpa || gpc->generation != slots->generation ||
0088         kvm_is_error_hva(gpc->uhva))
0089         return false;
0090 
0091     if (!gpc->valid)
0092         return false;
0093 
0094     return true;
0095 }
0096 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
0097 
0098 static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva)
0099 {
0100     /* Unmap the old pfn/page if it was mapped before. */
0101     if (!is_error_noslot_pfn(pfn) && khva) {
0102         if (pfn_valid(pfn))
0103             kunmap(pfn_to_page(pfn));
0104 #ifdef CONFIG_HAS_IOMEM
0105         else
0106             memunmap(khva);
0107 #endif
0108     }
0109 }
0110 
0111 static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
0112 {
0113     /*
0114      * mn_active_invalidate_count acts for all intents and purposes
0115      * like mmu_invalidate_in_progress here; but the latter cannot
0116      * be used here because the invalidation of caches in the
0117      * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
0118      * is elevated.
0119      *
0120      * Note, it does not matter that mn_active_invalidate_count
0121      * is not protected by gpc->lock.  It is guaranteed to
0122      * be elevated before the mmu_notifier acquires gpc->lock, and
0123      * isn't dropped until after mmu_invalidate_seq is updated.
0124      */
0125     if (kvm->mn_active_invalidate_count)
0126         return true;
0127 
0128     /*
0129      * Ensure mn_active_invalidate_count is read before
0130      * mmu_invalidate_seq.  This pairs with the smp_wmb() in
0131      * mmu_notifier_invalidate_range_end() to guarantee either the
0132      * old (non-zero) value of mn_active_invalidate_count or the
0133      * new (incremented) value of mmu_invalidate_seq is observed.
0134      */
0135     smp_rmb();
0136     return kvm->mmu_invalidate_seq != mmu_seq;
0137 }
0138 
0139 static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
0140 {
0141     /* Note, the new page offset may be different than the old! */
0142     void *old_khva = gpc->khva - offset_in_page(gpc->khva);
0143     kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
0144     void *new_khva = NULL;
0145     unsigned long mmu_seq;
0146 
0147     lockdep_assert_held(&gpc->refresh_lock);
0148 
0149     lockdep_assert_held_write(&gpc->lock);
0150 
0151     /*
0152      * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
0153      * assets have already been updated and so a concurrent check() from a
0154      * different task may not fail the gpa/uhva/generation checks.
0155      */
0156     gpc->valid = false;
0157 
0158     do {
0159         mmu_seq = kvm->mmu_invalidate_seq;
0160         smp_rmb();
0161 
0162         write_unlock_irq(&gpc->lock);
0163 
0164         /*
0165          * If the previous iteration "failed" due to an mmu_notifier
0166          * event, release the pfn and unmap the kernel virtual address
0167          * from the previous attempt.  Unmapping might sleep, so this
0168          * needs to be done after dropping the lock.  Opportunistically
0169          * check for resched while the lock isn't held.
0170          */
0171         if (new_pfn != KVM_PFN_ERR_FAULT) {
0172             /*
0173              * Keep the mapping if the previous iteration reused
0174              * the existing mapping and didn't create a new one.
0175              */
0176             if (new_khva != old_khva)
0177                 gpc_unmap_khva(kvm, new_pfn, new_khva);
0178 
0179             kvm_release_pfn_clean(new_pfn);
0180 
0181             cond_resched();
0182         }
0183 
0184         /* We always request a writeable mapping */
0185         new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
0186         if (is_error_noslot_pfn(new_pfn))
0187             goto out_error;
0188 
0189         /*
0190          * Obtain a new kernel mapping if KVM itself will access the
0191          * pfn.  Note, kmap() and memremap() can both sleep, so this
0192          * too must be done outside of gpc->lock!
0193          */
0194         if (gpc->usage & KVM_HOST_USES_PFN) {
0195             if (new_pfn == gpc->pfn) {
0196                 new_khva = old_khva;
0197             } else if (pfn_valid(new_pfn)) {
0198                 new_khva = kmap(pfn_to_page(new_pfn));
0199 #ifdef CONFIG_HAS_IOMEM
0200             } else {
0201                 new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
0202 #endif
0203             }
0204             if (!new_khva) {
0205                 kvm_release_pfn_clean(new_pfn);
0206                 goto out_error;
0207             }
0208         }
0209 
0210         write_lock_irq(&gpc->lock);
0211 
0212         /*
0213          * Other tasks must wait for _this_ refresh to complete before
0214          * attempting to refresh.
0215          */
0216         WARN_ON_ONCE(gpc->valid);
0217     } while (mmu_notifier_retry_cache(kvm, mmu_seq));
0218 
0219     gpc->valid = true;
0220     gpc->pfn = new_pfn;
0221     gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
0222 
0223     /*
0224      * Put the reference to the _new_ pfn.  The pfn is now tracked by the
0225      * cache and can be safely migrated, swapped, etc... as the cache will
0226      * invalidate any mappings in response to relevant mmu_notifier events.
0227      */
0228     kvm_release_pfn_clean(new_pfn);
0229 
0230     return 0;
0231 
0232 out_error:
0233     write_lock_irq(&gpc->lock);
0234 
0235     return -EFAULT;
0236 }
0237 
0238 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
0239                  gpa_t gpa, unsigned long len)
0240 {
0241     struct kvm_memslots *slots = kvm_memslots(kvm);
0242     unsigned long page_offset = gpa & ~PAGE_MASK;
0243     kvm_pfn_t old_pfn, new_pfn;
0244     unsigned long old_uhva;
0245     void *old_khva;
0246     int ret = 0;
0247 
0248     /*
0249      * If must fit within a single page. The 'len' argument is
0250      * only to enforce that.
0251      */
0252     if (page_offset + len > PAGE_SIZE)
0253         return -EINVAL;
0254 
0255     /*
0256      * If another task is refreshing the cache, wait for it to complete.
0257      * There is no guarantee that concurrent refreshes will see the same
0258      * gpa, memslots generation, etc..., so they must be fully serialized.
0259      */
0260     mutex_lock(&gpc->refresh_lock);
0261 
0262     write_lock_irq(&gpc->lock);
0263 
0264     old_pfn = gpc->pfn;
0265     old_khva = gpc->khva - offset_in_page(gpc->khva);
0266     old_uhva = gpc->uhva;
0267 
0268     /* If the userspace HVA is invalid, refresh that first */
0269     if (gpc->gpa != gpa || gpc->generation != slots->generation ||
0270         kvm_is_error_hva(gpc->uhva)) {
0271         gfn_t gfn = gpa_to_gfn(gpa);
0272 
0273         gpc->gpa = gpa;
0274         gpc->generation = slots->generation;
0275         gpc->memslot = __gfn_to_memslot(slots, gfn);
0276         gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
0277 
0278         if (kvm_is_error_hva(gpc->uhva)) {
0279             ret = -EFAULT;
0280             goto out;
0281         }
0282     }
0283 
0284     /*
0285      * If the userspace HVA changed or the PFN was already invalid,
0286      * drop the lock and do the HVA to PFN lookup again.
0287      */
0288     if (!gpc->valid || old_uhva != gpc->uhva) {
0289         ret = hva_to_pfn_retry(kvm, gpc);
0290     } else {
0291         /* If the HVA→PFN mapping was already valid, don't unmap it. */
0292         old_pfn = KVM_PFN_ERR_FAULT;
0293         old_khva = NULL;
0294     }
0295 
0296  out:
0297     /*
0298      * Invalidate the cache and purge the pfn/khva if the refresh failed.
0299      * Some/all of the uhva, gpa, and memslot generation info may still be
0300      * valid, leave it as is.
0301      */
0302     if (ret) {
0303         gpc->valid = false;
0304         gpc->pfn = KVM_PFN_ERR_FAULT;
0305         gpc->khva = NULL;
0306     }
0307 
0308     /* Snapshot the new pfn before dropping the lock! */
0309     new_pfn = gpc->pfn;
0310 
0311     write_unlock_irq(&gpc->lock);
0312 
0313     mutex_unlock(&gpc->refresh_lock);
0314 
0315     if (old_pfn != new_pfn)
0316         gpc_unmap_khva(kvm, old_pfn, old_khva);
0317 
0318     return ret;
0319 }
0320 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh);
0321 
0322 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
0323 {
0324     void *old_khva;
0325     kvm_pfn_t old_pfn;
0326 
0327     mutex_lock(&gpc->refresh_lock);
0328     write_lock_irq(&gpc->lock);
0329 
0330     gpc->valid = false;
0331 
0332     old_khva = gpc->khva - offset_in_page(gpc->khva);
0333     old_pfn = gpc->pfn;
0334 
0335     /*
0336      * We can leave the GPA → uHVA map cache intact but the PFN
0337      * lookup will need to be redone even for the same page.
0338      */
0339     gpc->khva = NULL;
0340     gpc->pfn = KVM_PFN_ERR_FAULT;
0341 
0342     write_unlock_irq(&gpc->lock);
0343     mutex_unlock(&gpc->refresh_lock);
0344 
0345     gpc_unmap_khva(kvm, old_pfn, old_khva);
0346 }
0347 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
0348 
0349 
0350 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
0351                   struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
0352                   gpa_t gpa, unsigned long len)
0353 {
0354     WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
0355 
0356     if (!gpc->active) {
0357         rwlock_init(&gpc->lock);
0358         mutex_init(&gpc->refresh_lock);
0359 
0360         gpc->khva = NULL;
0361         gpc->pfn = KVM_PFN_ERR_FAULT;
0362         gpc->uhva = KVM_HVA_ERR_BAD;
0363         gpc->vcpu = vcpu;
0364         gpc->usage = usage;
0365         gpc->valid = false;
0366         gpc->active = true;
0367 
0368         spin_lock(&kvm->gpc_lock);
0369         list_add(&gpc->list, &kvm->gpc_list);
0370         spin_unlock(&kvm->gpc_lock);
0371     }
0372     return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
0373 }
0374 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);
0375 
0376 void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
0377 {
0378     if (gpc->active) {
0379         spin_lock(&kvm->gpc_lock);
0380         list_del(&gpc->list);
0381         spin_unlock(&kvm->gpc_lock);
0382 
0383         kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
0384         gpc->active = false;
0385     }
0386 }
0387 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_destroy);