0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/kvm_host.h>
0015 #include <linux/rculist.h>
0016
0017 #include <asm/kvm_page_track.h>
0018
0019 #include "mmu.h"
0020 #include "mmu_internal.h"
0021
0022 bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
0023 {
0024 return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
0025 !tdp_enabled || kvm_shadow_root_allocated(kvm);
0026 }
0027
0028 void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
0029 {
0030 int i;
0031
0032 for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
0033 kvfree(slot->arch.gfn_track[i]);
0034 slot->arch.gfn_track[i] = NULL;
0035 }
0036 }
0037
0038 int kvm_page_track_create_memslot(struct kvm *kvm,
0039 struct kvm_memory_slot *slot,
0040 unsigned long npages)
0041 {
0042 int i;
0043
0044 for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
0045 if (i == KVM_PAGE_TRACK_WRITE &&
0046 !kvm_page_track_write_tracking_enabled(kvm))
0047 continue;
0048
0049 slot->arch.gfn_track[i] =
0050 __vcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
0051 GFP_KERNEL_ACCOUNT);
0052 if (!slot->arch.gfn_track[i])
0053 goto track_free;
0054 }
0055
0056 return 0;
0057
0058 track_free:
0059 kvm_page_track_free_memslot(slot);
0060 return -ENOMEM;
0061 }
0062
0063 static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
0064 {
0065 if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
0066 return false;
0067
0068 return true;
0069 }
0070
0071 int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
0072 {
0073 unsigned short *gfn_track;
0074
0075 if (slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE])
0076 return 0;
0077
0078 gfn_track = __vcalloc(slot->npages, sizeof(*gfn_track),
0079 GFP_KERNEL_ACCOUNT);
0080 if (gfn_track == NULL)
0081 return -ENOMEM;
0082
0083 slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE] = gfn_track;
0084 return 0;
0085 }
0086
0087 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
0088 enum kvm_page_track_mode mode, short count)
0089 {
0090 int index, val;
0091
0092 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
0093
0094 val = slot->arch.gfn_track[mode][index];
0095
0096 if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
0097 return;
0098
0099 slot->arch.gfn_track[mode][index] += count;
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 void kvm_slot_page_track_add_page(struct kvm *kvm,
0115 struct kvm_memory_slot *slot, gfn_t gfn,
0116 enum kvm_page_track_mode mode)
0117 {
0118
0119 if (WARN_ON(!page_track_mode_is_valid(mode)))
0120 return;
0121
0122 if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
0123 !kvm_page_track_write_tracking_enabled(kvm)))
0124 return;
0125
0126 update_gfn_track(slot, gfn, mode, 1);
0127
0128
0129
0130
0131
0132 kvm_mmu_gfn_disallow_lpage(slot, gfn);
0133
0134 if (mode == KVM_PAGE_TRACK_WRITE)
0135 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
0136 kvm_flush_remote_tlbs(kvm);
0137 }
0138 EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 void kvm_slot_page_track_remove_page(struct kvm *kvm,
0154 struct kvm_memory_slot *slot, gfn_t gfn,
0155 enum kvm_page_track_mode mode)
0156 {
0157 if (WARN_ON(!page_track_mode_is_valid(mode)))
0158 return;
0159
0160 if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
0161 !kvm_page_track_write_tracking_enabled(kvm)))
0162 return;
0163
0164 update_gfn_track(slot, gfn, mode, -1);
0165
0166
0167
0168
0169
0170 kvm_mmu_gfn_allow_lpage(slot, gfn);
0171 }
0172 EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
0173
0174
0175
0176
0177 bool kvm_slot_page_track_is_active(struct kvm *kvm,
0178 const struct kvm_memory_slot *slot,
0179 gfn_t gfn, enum kvm_page_track_mode mode)
0180 {
0181 int index;
0182
0183 if (WARN_ON(!page_track_mode_is_valid(mode)))
0184 return false;
0185
0186 if (!slot)
0187 return false;
0188
0189 if (mode == KVM_PAGE_TRACK_WRITE &&
0190 !kvm_page_track_write_tracking_enabled(kvm))
0191 return false;
0192
0193 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
0194 return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
0195 }
0196
0197 void kvm_page_track_cleanup(struct kvm *kvm)
0198 {
0199 struct kvm_page_track_notifier_head *head;
0200
0201 head = &kvm->arch.track_notifier_head;
0202 cleanup_srcu_struct(&head->track_srcu);
0203 }
0204
0205 int kvm_page_track_init(struct kvm *kvm)
0206 {
0207 struct kvm_page_track_notifier_head *head;
0208
0209 head = &kvm->arch.track_notifier_head;
0210 INIT_HLIST_HEAD(&head->track_notifier_list);
0211 return init_srcu_struct(&head->track_srcu);
0212 }
0213
0214
0215
0216
0217
0218 void
0219 kvm_page_track_register_notifier(struct kvm *kvm,
0220 struct kvm_page_track_notifier_node *n)
0221 {
0222 struct kvm_page_track_notifier_head *head;
0223
0224 head = &kvm->arch.track_notifier_head;
0225
0226 write_lock(&kvm->mmu_lock);
0227 hlist_add_head_rcu(&n->node, &head->track_notifier_list);
0228 write_unlock(&kvm->mmu_lock);
0229 }
0230 EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
0231
0232
0233
0234
0235
0236 void
0237 kvm_page_track_unregister_notifier(struct kvm *kvm,
0238 struct kvm_page_track_notifier_node *n)
0239 {
0240 struct kvm_page_track_notifier_head *head;
0241
0242 head = &kvm->arch.track_notifier_head;
0243
0244 write_lock(&kvm->mmu_lock);
0245 hlist_del_rcu(&n->node);
0246 write_unlock(&kvm->mmu_lock);
0247 synchronize_srcu(&head->track_srcu);
0248 }
0249 EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
0250
0251
0252
0253
0254
0255
0256
0257
0258 void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
0259 int bytes)
0260 {
0261 struct kvm_page_track_notifier_head *head;
0262 struct kvm_page_track_notifier_node *n;
0263 int idx;
0264
0265 head = &vcpu->kvm->arch.track_notifier_head;
0266
0267 if (hlist_empty(&head->track_notifier_list))
0268 return;
0269
0270 idx = srcu_read_lock(&head->track_srcu);
0271 hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
0272 srcu_read_lock_held(&head->track_srcu))
0273 if (n->track_write)
0274 n->track_write(vcpu, gpa, new, bytes, n);
0275 srcu_read_unlock(&head->track_srcu, idx);
0276 }
0277
0278
0279
0280
0281
0282
0283
0284
0285 void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
0286 {
0287 struct kvm_page_track_notifier_head *head;
0288 struct kvm_page_track_notifier_node *n;
0289 int idx;
0290
0291 head = &kvm->arch.track_notifier_head;
0292
0293 if (hlist_empty(&head->track_notifier_list))
0294 return;
0295
0296 idx = srcu_read_lock(&head->track_srcu);
0297 hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
0298 srcu_read_lock_held(&head->track_srcu))
0299 if (n->track_flush_slot)
0300 n->track_flush_slot(kvm, slot, n);
0301 srcu_read_unlock(&head->track_srcu, idx);
0302 }