0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/kvm_host.h>
0013 #include "mmu.h"
0014 #include "mmu_internal.h"
0015 #include "x86.h"
0016 #include "spte.h"
0017
0018 #include <asm/e820/api.h>
0019 #include <asm/memtype.h>
0020 #include <asm/vmx.h>
0021
0022 bool __read_mostly enable_mmio_caching = true;
0023 static bool __ro_after_init allow_mmio_caching;
0024 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
0025 EXPORT_SYMBOL_GPL(enable_mmio_caching);
0026
0027 u64 __read_mostly shadow_host_writable_mask;
0028 u64 __read_mostly shadow_mmu_writable_mask;
0029 u64 __read_mostly shadow_nx_mask;
0030 u64 __read_mostly shadow_x_mask;
0031 u64 __read_mostly shadow_user_mask;
0032 u64 __read_mostly shadow_accessed_mask;
0033 u64 __read_mostly shadow_dirty_mask;
0034 u64 __read_mostly shadow_mmio_value;
0035 u64 __read_mostly shadow_mmio_mask;
0036 u64 __read_mostly shadow_mmio_access_mask;
0037 u64 __read_mostly shadow_present_mask;
0038 u64 __read_mostly shadow_memtype_mask;
0039 u64 __read_mostly shadow_me_value;
0040 u64 __read_mostly shadow_me_mask;
0041 u64 __read_mostly shadow_acc_track_mask;
0042
0043 u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
0044 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
0045
0046 u8 __read_mostly shadow_phys_bits;
0047
0048 void __init kvm_mmu_spte_module_init(void)
0049 {
0050
0051
0052
0053
0054
0055
0056
0057 allow_mmio_caching = enable_mmio_caching;
0058 }
0059
0060 static u64 generation_mmio_spte_mask(u64 gen)
0061 {
0062 u64 mask;
0063
0064 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
0065
0066 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
0067 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
0068 return mask;
0069 }
0070
0071 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
0072 {
0073 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
0074 u64 spte = generation_mmio_spte_mask(gen);
0075 u64 gpa = gfn << PAGE_SHIFT;
0076
0077 WARN_ON_ONCE(!shadow_mmio_value);
0078
0079 access &= shadow_mmio_access_mask;
0080 spte |= shadow_mmio_value | access;
0081 spte |= gpa | shadow_nonpresent_or_rsvd_mask;
0082 spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
0083 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
0084
0085 return spte;
0086 }
0087
0088 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
0089 {
0090 if (pfn_valid(pfn))
0091 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
0103
0104 return !e820__mapped_raw_any(pfn_to_hpa(pfn),
0105 pfn_to_hpa(pfn + 1) - 1,
0106 E820_TYPE_RAM);
0107 }
0108
0109
0110
0111
0112
0113
0114 bool spte_has_volatile_bits(u64 spte)
0115 {
0116
0117
0118
0119
0120
0121
0122 if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
0123 return true;
0124
0125 if (is_access_track_spte(spte))
0126 return true;
0127
0128 if (spte_ad_enabled(spte)) {
0129 if (!(spte & shadow_accessed_mask) ||
0130 (is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
0131 return true;
0132 }
0133
0134 return false;
0135 }
0136
0137 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
0138 const struct kvm_memory_slot *slot,
0139 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
0140 u64 old_spte, bool prefetch, bool can_unsync,
0141 bool host_writable, u64 *new_spte)
0142 {
0143 int level = sp->role.level;
0144 u64 spte = SPTE_MMU_PRESENT_MASK;
0145 bool wrprot = false;
0146
0147 WARN_ON_ONCE(!pte_access && !shadow_present_mask);
0148
0149 if (sp->role.ad_disabled)
0150 spte |= SPTE_TDP_AD_DISABLED_MASK;
0151 else if (kvm_mmu_page_ad_need_write_protect(sp))
0152 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
0153
0154
0155
0156
0157
0158
0159
0160 spte |= shadow_present_mask;
0161 if (!prefetch)
0162 spte |= spte_shadow_accessed_mask(spte);
0163
0164 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
0165 is_nx_huge_page_enabled(vcpu->kvm)) {
0166 pte_access &= ~ACC_EXEC_MASK;
0167 }
0168
0169 if (pte_access & ACC_EXEC_MASK)
0170 spte |= shadow_x_mask;
0171 else
0172 spte |= shadow_nx_mask;
0173
0174 if (pte_access & ACC_USER_MASK)
0175 spte |= shadow_user_mask;
0176
0177 if (level > PG_LEVEL_4K)
0178 spte |= PT_PAGE_SIZE_MASK;
0179
0180 if (shadow_memtype_mask)
0181 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
0182 kvm_is_mmio_pfn(pfn));
0183 if (host_writable)
0184 spte |= shadow_host_writable_mask;
0185 else
0186 pte_access &= ~ACC_WRITE_MASK;
0187
0188 if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
0189 spte |= shadow_me_value;
0190
0191 spte |= (u64)pfn << PAGE_SHIFT;
0192
0193 if (pte_access & ACC_WRITE_MASK) {
0194 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
0195
0196
0197
0198
0199
0200
0201
0202 if (is_writable_pte(old_spte))
0203 goto out;
0204
0205
0206
0207
0208
0209
0210
0211 if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
0212 pgprintk("%s: found shadow page for %llx, marking ro\n",
0213 __func__, gfn);
0214 wrprot = true;
0215 pte_access &= ~ACC_WRITE_MASK;
0216 spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
0217 }
0218 }
0219
0220 if (pte_access & ACC_WRITE_MASK)
0221 spte |= spte_shadow_dirty_mask(spte);
0222
0223 out:
0224 if (prefetch)
0225 spte = mark_spte_for_access_track(spte);
0226
0227 WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
0228 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
0229 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
0230
0231 if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
0232
0233 WARN_ON(level > PG_LEVEL_4K);
0234 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
0235 }
0236
0237 *new_spte = spte;
0238 return wrprot;
0239 }
0240
0241 static u64 make_spte_executable(u64 spte)
0242 {
0243 bool is_access_track = is_access_track_spte(spte);
0244
0245 if (is_access_track)
0246 spte = restore_acc_track_spte(spte);
0247
0248 spte &= ~shadow_nx_mask;
0249 spte |= shadow_x_mask;
0250
0251 if (is_access_track)
0252 spte = mark_spte_for_access_track(spte);
0253
0254 return spte;
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264 u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role,
0265 int index)
0266 {
0267 u64 child_spte;
0268
0269 if (WARN_ON_ONCE(!is_shadow_present_pte(huge_spte)))
0270 return 0;
0271
0272 if (WARN_ON_ONCE(!is_large_pte(huge_spte)))
0273 return 0;
0274
0275 child_spte = huge_spte;
0276
0277
0278
0279
0280
0281
0282 child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT;
0283
0284 if (role.level == PG_LEVEL_4K) {
0285 child_spte &= ~PT_PAGE_SIZE_MASK;
0286
0287
0288
0289
0290
0291
0292 if ((role.access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(kvm))
0293 child_spte = make_spte_executable(child_spte);
0294 }
0295
0296 return child_spte;
0297 }
0298
0299
0300 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
0301 {
0302 u64 spte = SPTE_MMU_PRESENT_MASK;
0303
0304 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
0305 shadow_user_mask | shadow_x_mask | shadow_me_value;
0306
0307 if (ad_disabled)
0308 spte |= SPTE_TDP_AD_DISABLED_MASK;
0309 else
0310 spte |= shadow_accessed_mask;
0311
0312 return spte;
0313 }
0314
0315 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
0316 {
0317 u64 new_spte;
0318
0319 new_spte = old_spte & ~SPTE_BASE_ADDR_MASK;
0320 new_spte |= (u64)new_pfn << PAGE_SHIFT;
0321
0322 new_spte &= ~PT_WRITABLE_MASK;
0323 new_spte &= ~shadow_host_writable_mask;
0324 new_spte &= ~shadow_mmu_writable_mask;
0325
0326 new_spte = mark_spte_for_access_track(new_spte);
0327
0328 return new_spte;
0329 }
0330
0331 u64 mark_spte_for_access_track(u64 spte)
0332 {
0333 if (spte_ad_enabled(spte))
0334 return spte & ~shadow_accessed_mask;
0335
0336 if (is_access_track_spte(spte))
0337 return spte;
0338
0339 check_spte_writable_invariants(spte);
0340
0341 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
0342 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
0343 "kvm: Access Tracking saved bit locations are not zero\n");
0344
0345 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
0346 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
0347 spte &= ~shadow_acc_track_mask;
0348
0349 return spte;
0350 }
0351
0352 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
0353 {
0354 BUG_ON((u64)(unsigned)access_mask != access_mask);
0355 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
0356
0357
0358
0359
0360
0361
0362 enable_mmio_caching = allow_mmio_caching;
0363 if (!enable_mmio_caching)
0364 mmio_value = 0;
0365
0366
0367
0368
0369
0370
0371 if (WARN_ON(mmio_mask & ~SPTE_MMIO_ALLOWED_MASK))
0372 mmio_value = 0;
0373
0374
0375
0376
0377
0378
0379
0380
0381 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
0382 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
0383 mmio_value = 0;
0384
0385
0386
0387
0388
0389
0390
0391 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
0392 WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
0393 mmio_value = 0;
0394
0395 if (!mmio_value)
0396 enable_mmio_caching = false;
0397
0398 shadow_mmio_value = mmio_value;
0399 shadow_mmio_mask = mmio_mask;
0400 shadow_mmio_access_mask = access_mask;
0401 }
0402 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
0403
0404 void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
0405 {
0406
0407 if (WARN_ON(me_value & ~me_mask))
0408 me_value = me_mask = 0;
0409
0410 shadow_me_value = me_value;
0411 shadow_me_mask = me_mask;
0412 }
0413 EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask);
0414
0415 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
0416 {
0417 shadow_user_mask = VMX_EPT_READABLE_MASK;
0418 shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
0419 shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
0420 shadow_nx_mask = 0ull;
0421 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
0422 shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
0423
0424
0425
0426
0427
0428
0429 shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT;
0430 shadow_acc_track_mask = VMX_EPT_RWX_MASK;
0431 shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
0432 shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
0433
0434
0435
0436
0437
0438 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
0439 VMX_EPT_RWX_MASK, 0);
0440 }
0441 EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
0442
0443 void kvm_mmu_reset_all_pte_masks(void)
0444 {
0445 u8 low_phys_bits;
0446 u64 mask;
0447
0448 shadow_phys_bits = kvm_get_shadow_phys_bits();
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460 shadow_nonpresent_or_rsvd_mask = 0;
0461 low_phys_bits = boot_cpu_data.x86_phys_bits;
0462 if (boot_cpu_has_bug(X86_BUG_L1TF) &&
0463 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
0464 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
0465 low_phys_bits = boot_cpu_data.x86_cache_bits
0466 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
0467 shadow_nonpresent_or_rsvd_mask =
0468 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
0469 }
0470
0471 shadow_nonpresent_or_rsvd_lower_gfn_mask =
0472 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
0473
0474 shadow_user_mask = PT_USER_MASK;
0475 shadow_accessed_mask = PT_ACCESSED_MASK;
0476 shadow_dirty_mask = PT_DIRTY_MASK;
0477 shadow_nx_mask = PT64_NX_MASK;
0478 shadow_x_mask = 0;
0479 shadow_present_mask = PT_PRESENT_MASK;
0480
0481
0482
0483
0484
0485
0486 shadow_memtype_mask = 0;
0487 shadow_acc_track_mask = 0;
0488 shadow_me_mask = 0;
0489 shadow_me_value = 0;
0490
0491 shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE;
0492 shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITABLE;
0493
0494
0495
0496
0497
0498
0499
0500
0501 if (shadow_phys_bits < 52)
0502 mask = BIT_ULL(51) | PT_PRESENT_MASK;
0503 else
0504 mask = 0;
0505
0506 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
0507 }