0001
0002
0003
0004
0005
0006
0007 #include <linux/kvm_host.h>
0008 #include <linux/kvm.h>
0009 #include <linux/vmalloc.h>
0010 #include <linux/kvm_dirty_ring.h>
0011 #include <trace/events/kvm.h>
0012 #include "kvm_mm.h"
0013
0014 int __weak kvm_cpu_dirty_log_size(void)
0015 {
0016 return 0;
0017 }
0018
0019 u32 kvm_dirty_ring_get_rsvd_entries(void)
0020 {
0021 return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
0022 }
0023
0024 static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
0025 {
0026 return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
0027 }
0028
0029 bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
0030 {
0031 return kvm_dirty_ring_used(ring) >= ring->soft_limit;
0032 }
0033
0034 static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
0035 {
0036 return kvm_dirty_ring_used(ring) >= ring->size;
0037 }
0038
0039 static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
0040 {
0041 struct kvm_memory_slot *memslot;
0042 int as_id, id;
0043
0044 as_id = slot >> 16;
0045 id = (u16)slot;
0046
0047 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
0048 return;
0049
0050 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
0051
0052 if (!memslot || (offset + __fls(mask)) >= memslot->npages)
0053 return;
0054
0055 KVM_MMU_LOCK(kvm);
0056 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
0057 KVM_MMU_UNLOCK(kvm);
0058 }
0059
0060 int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
0061 {
0062 ring->dirty_gfns = vzalloc(size);
0063 if (!ring->dirty_gfns)
0064 return -ENOMEM;
0065
0066 ring->size = size / sizeof(struct kvm_dirty_gfn);
0067 ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
0068 ring->dirty_index = 0;
0069 ring->reset_index = 0;
0070 ring->index = index;
0071
0072 return 0;
0073 }
0074
0075 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
0076 {
0077 gfn->flags = 0;
0078 }
0079
0080 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
0081 {
0082 gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
0083 }
0084
0085 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
0086 {
0087 return gfn->flags & KVM_DIRTY_GFN_F_RESET;
0088 }
0089
0090 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
0091 {
0092 u32 cur_slot, next_slot;
0093 u64 cur_offset, next_offset;
0094 unsigned long mask;
0095 int count = 0;
0096 struct kvm_dirty_gfn *entry;
0097 bool first_round = true;
0098
0099
0100 cur_slot = cur_offset = mask = 0;
0101
0102 while (true) {
0103 entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
0104
0105 if (!kvm_dirty_gfn_harvested(entry))
0106 break;
0107
0108 next_slot = READ_ONCE(entry->slot);
0109 next_offset = READ_ONCE(entry->offset);
0110
0111
0112 kvm_dirty_gfn_set_invalid(entry);
0113
0114 ring->reset_index++;
0115 count++;
0116
0117
0118
0119
0120 if (!first_round && next_slot == cur_slot) {
0121 s64 delta = next_offset - cur_offset;
0122
0123 if (delta >= 0 && delta < BITS_PER_LONG) {
0124 mask |= 1ull << delta;
0125 continue;
0126 }
0127
0128
0129 if (delta > -BITS_PER_LONG && delta < 0 &&
0130 (mask << -delta >> -delta) == mask) {
0131 cur_offset = next_offset;
0132 mask = (mask << -delta) | 1;
0133 continue;
0134 }
0135 }
0136 kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
0137 cur_slot = next_slot;
0138 cur_offset = next_offset;
0139 mask = 1;
0140 first_round = false;
0141 }
0142
0143 kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
0144
0145 trace_kvm_dirty_ring_reset(ring);
0146
0147 return count;
0148 }
0149
0150 void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
0151 {
0152 struct kvm_dirty_gfn *entry;
0153
0154
0155 WARN_ON_ONCE(kvm_dirty_ring_full(ring));
0156
0157 entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
0158
0159 entry->slot = slot;
0160 entry->offset = offset;
0161
0162
0163
0164
0165 smp_wmb();
0166 kvm_dirty_gfn_set_dirtied(entry);
0167 ring->dirty_index++;
0168 trace_kvm_dirty_ring_push(ring, slot, offset);
0169 }
0170
0171 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
0172 {
0173 return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
0174 }
0175
0176 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
0177 {
0178 vfree(ring->dirty_gfns);
0179 ring->dirty_gfns = NULL;
0180 }