0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/bitfield.h>
0014 #include <linux/bitops.h>
0015
0016 #include <asm/kvm_asm.h>
0017 #include <asm/kvm_mmu.h>
0018
0019 unsigned int kvm_arm_vmid_bits;
0020 static DEFINE_RAW_SPINLOCK(cpu_vmid_lock);
0021
0022 static atomic64_t vmid_generation;
0023 static unsigned long *vmid_map;
0024
0025 static DEFINE_PER_CPU(atomic64_t, active_vmids);
0026 static DEFINE_PER_CPU(u64, reserved_vmids);
0027
0028 #define VMID_MASK (~GENMASK(kvm_arm_vmid_bits - 1, 0))
0029 #define VMID_FIRST_VERSION (1UL << kvm_arm_vmid_bits)
0030
0031 #define NUM_USER_VMIDS VMID_FIRST_VERSION
0032 #define vmid2idx(vmid) ((vmid) & ~VMID_MASK)
0033 #define idx2vmid(idx) vmid2idx(idx)
0034
0035
0036
0037
0038
0039
0040 #define VMID_ACTIVE_INVALID VMID_FIRST_VERSION
0041
0042 #define vmid_gen_match(vmid) \
0043 (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
0044
0045 static void flush_context(void)
0046 {
0047 int cpu;
0048 u64 vmid;
0049
0050 bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
0051
0052 for_each_possible_cpu(cpu) {
0053 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
0054
0055
0056 if (vmid == 0)
0057 vmid = per_cpu(reserved_vmids, cpu);
0058 __set_bit(vmid2idx(vmid), vmid_map);
0059 per_cpu(reserved_vmids, cpu) = vmid;
0060 }
0061
0062
0063
0064
0065
0066
0067
0068
0069 kvm_call_hyp(__kvm_flush_vm_context);
0070 }
0071
0072 static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
0073 {
0074 int cpu;
0075 bool hit = false;
0076
0077
0078
0079
0080
0081
0082 for_each_possible_cpu(cpu) {
0083 if (per_cpu(reserved_vmids, cpu) == vmid) {
0084 hit = true;
0085 per_cpu(reserved_vmids, cpu) = newvmid;
0086 }
0087 }
0088
0089 return hit;
0090 }
0091
0092 static u64 new_vmid(struct kvm_vmid *kvm_vmid)
0093 {
0094 static u32 cur_idx = 1;
0095 u64 vmid = atomic64_read(&kvm_vmid->id);
0096 u64 generation = atomic64_read(&vmid_generation);
0097
0098 if (vmid != 0) {
0099 u64 newvmid = generation | (vmid & ~VMID_MASK);
0100
0101 if (check_update_reserved_vmid(vmid, newvmid)) {
0102 atomic64_set(&kvm_vmid->id, newvmid);
0103 return newvmid;
0104 }
0105
0106 if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
0107 atomic64_set(&kvm_vmid->id, newvmid);
0108 return newvmid;
0109 }
0110 }
0111
0112 vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
0113 if (vmid != NUM_USER_VMIDS)
0114 goto set_vmid;
0115
0116
0117 generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION,
0118 &vmid_generation);
0119 flush_context();
0120
0121
0122 vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
0123
0124 set_vmid:
0125 __set_bit(vmid, vmid_map);
0126 cur_idx = vmid;
0127 vmid = idx2vmid(vmid) | generation;
0128 atomic64_set(&kvm_vmid->id, vmid);
0129 return vmid;
0130 }
0131
0132
0133 void kvm_arm_vmid_clear_active(void)
0134 {
0135 atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
0136 }
0137
0138 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
0139 {
0140 unsigned long flags;
0141 u64 vmid, old_active_vmid;
0142
0143 vmid = atomic64_read(&kvm_vmid->id);
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
0156 if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
0157 0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
0158 old_active_vmid, vmid))
0159 return;
0160
0161 raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
0162
0163
0164 vmid = atomic64_read(&kvm_vmid->id);
0165 if (!vmid_gen_match(vmid))
0166 vmid = new_vmid(kvm_vmid);
0167
0168 atomic64_set(this_cpu_ptr(&active_vmids), vmid);
0169 raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
0170 }
0171
0172
0173
0174
0175 int kvm_arm_vmid_alloc_init(void)
0176 {
0177 kvm_arm_vmid_bits = kvm_get_vmid_bits();
0178
0179
0180
0181
0182
0183 WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
0184 atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
0185 vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
0186 sizeof(*vmid_map), GFP_KERNEL);
0187 if (!vmid_map)
0188 return -ENOMEM;
0189
0190 return 0;
0191 }
0192
0193 void kvm_arm_vmid_alloc_free(void)
0194 {
0195 kfree(vmid_map);
0196 }