0001
0002
0003 #ifndef KVM_X86_MMU_SPTE_H
0004 #define KVM_X86_MMU_SPTE_H
0005
0006 #include "mmu_internal.h"
0007
0008
0009
0010
0011
0012
0013
0014
0015 #define SPTE_MMU_PRESENT_MASK BIT_ULL(11)
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #define SPTE_TDP_AD_SHIFT 52
0030 #define SPTE_TDP_AD_MASK (3ULL << SPTE_TDP_AD_SHIFT)
0031 #define SPTE_TDP_AD_ENABLED_MASK (0ULL << SPTE_TDP_AD_SHIFT)
0032 #define SPTE_TDP_AD_DISABLED_MASK (1ULL << SPTE_TDP_AD_SHIFT)
0033 #define SPTE_TDP_AD_WRPROT_ONLY_MASK (2ULL << SPTE_TDP_AD_SHIFT)
0034 static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
0035
0036 #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
0037 #define SPTE_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
0038 #else
0039 #define SPTE_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
0040 #endif
0041
0042 #define SPTE_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
0043 | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
0044
0045 #define ACC_EXEC_MASK 1
0046 #define ACC_WRITE_MASK PT_WRITABLE_MASK
0047 #define ACC_USER_MASK PT_USER_MASK
0048 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
0049
0050
0051 #define SPTE_EPT_READABLE_MASK 0x1ull
0052 #define SPTE_EPT_EXECUTABLE_MASK 0x4ull
0053
0054 #define SPTE_LEVEL_BITS 9
0055 #define SPTE_LEVEL_SHIFT(level) __PT_LEVEL_SHIFT(level, SPTE_LEVEL_BITS)
0056 #define SPTE_INDEX(address, level) __PT_INDEX(address, level, SPTE_LEVEL_BITS)
0057 #define SPTE_ENT_PER_PAGE __PT_ENT_PER_PAGE(SPTE_LEVEL_BITS)
0058
0059
0060
0061
0062
0063
0064
0065
0066 #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (SPTE_EPT_READABLE_MASK | \
0067 SPTE_EPT_EXECUTABLE_MASK)
0068 #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54
0069 #define SHADOW_ACC_TRACK_SAVED_MASK (SHADOW_ACC_TRACK_SAVED_BITS_MASK << \
0070 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
0071 static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK));
0072
0073
0074
0075
0076
0077
0078
0079 #define DEFAULT_SPTE_HOST_WRITABLE BIT_ULL(9)
0080 #define DEFAULT_SPTE_MMU_WRITABLE BIT_ULL(10)
0081
0082
0083
0084
0085
0086
0087 #define EPT_SPTE_HOST_WRITABLE BIT_ULL(57)
0088 #define EPT_SPTE_MMU_WRITABLE BIT_ULL(58)
0089
0090 static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK));
0091 static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK));
0092 static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
0093 static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
0094
0095
0096 #undef SHADOW_ACC_TRACK_SAVED_MASK
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113 #define MMIO_SPTE_GEN_LOW_START 3
0114 #define MMIO_SPTE_GEN_LOW_END 10
0115
0116 #define MMIO_SPTE_GEN_HIGH_START 52
0117 #define MMIO_SPTE_GEN_HIGH_END 62
0118
0119 #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
0120 MMIO_SPTE_GEN_LOW_START)
0121 #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
0122 MMIO_SPTE_GEN_HIGH_START)
0123 static_assert(!(SPTE_MMU_PRESENT_MASK &
0124 (MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK)));
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136 #define SPTE_MMIO_ALLOWED_MASK (BIT_ULL(63) | GENMASK_ULL(51, 12) | GENMASK_ULL(2, 0))
0137 static_assert(!(SPTE_MMIO_ALLOWED_MASK &
0138 (SPTE_MMU_PRESENT_MASK | MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK)));
0139
0140 #define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
0141 #define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
0142
0143
0144 static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
0145
0146 #define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0)
0147 #define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
0148
0149 #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
0150
0151 extern u64 __read_mostly shadow_host_writable_mask;
0152 extern u64 __read_mostly shadow_mmu_writable_mask;
0153 extern u64 __read_mostly shadow_nx_mask;
0154 extern u64 __read_mostly shadow_x_mask;
0155 extern u64 __read_mostly shadow_user_mask;
0156 extern u64 __read_mostly shadow_accessed_mask;
0157 extern u64 __read_mostly shadow_dirty_mask;
0158 extern u64 __read_mostly shadow_mmio_value;
0159 extern u64 __read_mostly shadow_mmio_mask;
0160 extern u64 __read_mostly shadow_mmio_access_mask;
0161 extern u64 __read_mostly shadow_present_mask;
0162 extern u64 __read_mostly shadow_memtype_mask;
0163 extern u64 __read_mostly shadow_me_value;
0164 extern u64 __read_mostly shadow_me_mask;
0165
0166
0167
0168
0169
0170
0171 extern u64 __read_mostly shadow_acc_track_mask;
0172
0173
0174
0175
0176
0177 extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
0178
0179
0180
0181
0182 #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 #define REMOVED_SPTE 0x5a0ULL
0197
0198
0199 static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK));
0200
0201 static inline bool is_removed_spte(u64 spte)
0202 {
0203 return spte == REMOVED_SPTE;
0204 }
0205
0206
0207 static inline int spte_index(u64 *sptep)
0208 {
0209 return ((unsigned long)sptep / sizeof(*sptep)) & (SPTE_ENT_PER_PAGE - 1);
0210 }
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220 extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
0221
0222 static inline bool is_mmio_spte(u64 spte)
0223 {
0224 return (spte & shadow_mmio_mask) == shadow_mmio_value &&
0225 likely(enable_mmio_caching);
0226 }
0227
0228 static inline bool is_shadow_present_pte(u64 pte)
0229 {
0230 return !!(pte & SPTE_MMU_PRESENT_MASK);
0231 }
0232
0233
0234
0235
0236
0237
0238
0239 static inline bool kvm_ad_enabled(void)
0240 {
0241 return !!shadow_accessed_mask;
0242 }
0243
0244 static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
0245 {
0246 return sp->role.ad_disabled;
0247 }
0248
0249 static inline bool spte_ad_enabled(u64 spte)
0250 {
0251 MMU_WARN_ON(!is_shadow_present_pte(spte));
0252 return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED_MASK;
0253 }
0254
0255 static inline bool spte_ad_need_write_protect(u64 spte)
0256 {
0257 MMU_WARN_ON(!is_shadow_present_pte(spte));
0258
0259
0260
0261
0262
0263 return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED_MASK;
0264 }
0265
0266 static inline u64 spte_shadow_accessed_mask(u64 spte)
0267 {
0268 MMU_WARN_ON(!is_shadow_present_pte(spte));
0269 return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
0270 }
0271
0272 static inline u64 spte_shadow_dirty_mask(u64 spte)
0273 {
0274 MMU_WARN_ON(!is_shadow_present_pte(spte));
0275 return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
0276 }
0277
0278 static inline bool is_access_track_spte(u64 spte)
0279 {
0280 return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
0281 }
0282
0283 static inline bool is_large_pte(u64 pte)
0284 {
0285 return pte & PT_PAGE_SIZE_MASK;
0286 }
0287
0288 static inline bool is_last_spte(u64 pte, int level)
0289 {
0290 return (level == PG_LEVEL_4K) || is_large_pte(pte);
0291 }
0292
0293 static inline bool is_executable_pte(u64 spte)
0294 {
0295 return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
0296 }
0297
0298 static inline kvm_pfn_t spte_to_pfn(u64 pte)
0299 {
0300 return (pte & SPTE_BASE_ADDR_MASK) >> PAGE_SHIFT;
0301 }
0302
0303 static inline bool is_accessed_spte(u64 spte)
0304 {
0305 u64 accessed_mask = spte_shadow_accessed_mask(spte);
0306
0307 return accessed_mask ? spte & accessed_mask
0308 : !is_access_track_spte(spte);
0309 }
0310
0311 static inline bool is_dirty_spte(u64 spte)
0312 {
0313 u64 dirty_mask = spte_shadow_dirty_mask(spte);
0314
0315 return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
0316 }
0317
0318 static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
0319 int level)
0320 {
0321 int bit7 = (pte >> 7) & 1;
0322
0323 return rsvd_check->rsvd_bits_mask[bit7][level-1];
0324 }
0325
0326 static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
0327 u64 pte, int level)
0328 {
0329 return pte & get_rsvd_bits(rsvd_check, pte, level);
0330 }
0331
0332 static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
0333 u64 pte)
0334 {
0335 return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
0336 }
0337
0338 static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
0339 u64 spte, int level)
0340 {
0341 return __is_bad_mt_xwr(rsvd_check, spte) ||
0342 __is_rsvd_bits_set(rsvd_check, spte, level);
0343 }
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 static inline bool is_writable_pte(unsigned long pte)
0412 {
0413 return pte & PT_WRITABLE_MASK;
0414 }
0415
0416
0417 static inline void check_spte_writable_invariants(u64 spte)
0418 {
0419 if (spte & shadow_mmu_writable_mask)
0420 WARN_ONCE(!(spte & shadow_host_writable_mask),
0421 "kvm: MMU-writable SPTE is not Host-writable: %llx",
0422 spte);
0423 else
0424 WARN_ONCE(is_writable_pte(spte),
0425 "kvm: Writable SPTE is not MMU-writable: %llx", spte);
0426 }
0427
0428 static inline bool is_mmu_writable_spte(u64 spte)
0429 {
0430 return spte & shadow_mmu_writable_mask;
0431 }
0432
0433 static inline u64 get_mmio_spte_generation(u64 spte)
0434 {
0435 u64 gen;
0436
0437 gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT;
0438 gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT;
0439 return gen;
0440 }
0441
0442 bool spte_has_volatile_bits(u64 spte);
0443
0444 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
0445 const struct kvm_memory_slot *slot,
0446 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
0447 u64 old_spte, bool prefetch, bool can_unsync,
0448 bool host_writable, u64 *new_spte);
0449 u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte,
0450 union kvm_mmu_page_role role, int index);
0451 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
0452 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
0453 u64 mark_spte_for_access_track(u64 spte);
0454
0455
0456 static inline u64 restore_acc_track_spte(u64 spte)
0457 {
0458 u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
0459 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
0460
0461 spte &= ~shadow_acc_track_mask;
0462 spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
0463 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
0464 spte |= saved_bits;
0465
0466 return spte;
0467 }
0468
0469 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
0470
0471 void __init kvm_mmu_spte_module_init(void);
0472 void kvm_mmu_reset_all_pte_masks(void);
0473
0474 #endif