![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0 0002 0003 #ifndef __KVM_X86_MMU_TDP_ITER_H 0004 #define __KVM_X86_MMU_TDP_ITER_H 0005 0006 #include <linux/kvm_host.h> 0007 0008 #include "mmu.h" 0009 #include "spte.h" 0010 0011 /* 0012 * TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs) 0013 * to be zapped while holding mmu_lock for read, and to allow TLB flushes to be 0014 * batched without having to collect the list of zapped SPs. Flows that can 0015 * remove SPs must service pending TLB flushes prior to dropping RCU protection. 0016 */ 0017 static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep) 0018 { 0019 return READ_ONCE(*rcu_dereference(sptep)); 0020 } 0021 0022 static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte) 0023 { 0024 return xchg(rcu_dereference(sptep), new_spte); 0025 } 0026 0027 static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte) 0028 { 0029 WRITE_ONCE(*rcu_dereference(sptep), new_spte); 0030 } 0031 0032 static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, 0033 u64 new_spte, int level) 0034 { 0035 /* 0036 * Atomically write the SPTE if it is a shadow-present, leaf SPTE with 0037 * volatile bits, i.e. has bits that can be set outside of mmu_lock. 0038 * The Writable bit can be set by KVM's fast page fault handler, and 0039 * Accessed and Dirty bits can be set by the CPU. 0040 * 0041 * Note, non-leaf SPTEs do have Accessed bits and those bits are 0042 * technically volatile, but KVM doesn't consume the Accessed bit of 0043 * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit. This 0044 * logic needs to be reassessed if KVM were to use non-leaf Accessed 0045 * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs. 0046 */ 0047 if (is_shadow_present_pte(old_spte) && is_last_spte(old_spte, level) && 0048 spte_has_volatile_bits(old_spte)) 0049 return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte); 0050 0051 __kvm_tdp_mmu_write_spte(sptep, new_spte); 0052 return old_spte; 0053 } 0054 0055 /* 0056 * A TDP iterator performs a pre-order walk over a TDP paging structure. 0057 */ 0058 struct tdp_iter { 0059 /* 0060 * The iterator will traverse the paging structure towards the mapping 0061 * for this GFN. 0062 */ 0063 gfn_t next_last_level_gfn; 0064 /* 0065 * The next_last_level_gfn at the time when the thread last 0066 * yielded. Only yielding when the next_last_level_gfn != 0067 * yielded_gfn helps ensure forward progress. 0068 */ 0069 gfn_t yielded_gfn; 0070 /* Pointers to the page tables traversed to reach the current SPTE */ 0071 tdp_ptep_t pt_path[PT64_ROOT_MAX_LEVEL]; 0072 /* A pointer to the current SPTE */ 0073 tdp_ptep_t sptep; 0074 /* The lowest GFN mapped by the current SPTE */ 0075 gfn_t gfn; 0076 /* The level of the root page given to the iterator */ 0077 int root_level; 0078 /* The lowest level the iterator should traverse to */ 0079 int min_level; 0080 /* The iterator's current level within the paging structure */ 0081 int level; 0082 /* The address space ID, i.e. SMM vs. regular. */ 0083 int as_id; 0084 /* A snapshot of the value at sptep */ 0085 u64 old_spte; 0086 /* 0087 * Whether the iterator has a valid state. This will be false if the 0088 * iterator walks off the end of the paging structure. 0089 */ 0090 bool valid; 0091 /* 0092 * True if KVM dropped mmu_lock and yielded in the middle of a walk, in 0093 * which case tdp_iter_next() needs to restart the walk at the root 0094 * level instead of advancing to the next entry. 0095 */ 0096 bool yielded; 0097 }; 0098 0099 /* 0100 * Iterates over every SPTE mapping the GFN range [start, end) in a 0101 * preorder traversal. 0102 */ 0103 #define for_each_tdp_pte_min_level(iter, root, min_level, start, end) \ 0104 for (tdp_iter_start(&iter, root, min_level, start); \ 0105 iter.valid && iter.gfn < end; \ 0106 tdp_iter_next(&iter)) 0107 0108 #define for_each_tdp_pte(iter, root, start, end) \ 0109 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) 0110 0111 tdp_ptep_t spte_to_child_pt(u64 pte, int level); 0112 0113 void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, 0114 int min_level, gfn_t next_last_level_gfn); 0115 void tdp_iter_next(struct tdp_iter *iter); 0116 void tdp_iter_restart(struct tdp_iter *iter); 0117 0118 #endif /* __KVM_X86_MMU_TDP_ITER_H */
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |