Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * VMID allocator.
0004  *
0005  * Based on Arm64 ASID allocator algorithm.
0006  * Please refer arch/arm64/mm/context.c for detailed
0007  * comments on algorithm.
0008  *
0009  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
0010  * Copyright (C) 2012 ARM Ltd.
0011  */
0012 
0013 #include <linux/bitfield.h>
0014 #include <linux/bitops.h>
0015 
0016 #include <asm/kvm_asm.h>
0017 #include <asm/kvm_mmu.h>
0018 
0019 unsigned int kvm_arm_vmid_bits;
0020 static DEFINE_RAW_SPINLOCK(cpu_vmid_lock);
0021 
0022 static atomic64_t vmid_generation;
0023 static unsigned long *vmid_map;
0024 
0025 static DEFINE_PER_CPU(atomic64_t, active_vmids);
0026 static DEFINE_PER_CPU(u64, reserved_vmids);
0027 
0028 #define VMID_MASK       (~GENMASK(kvm_arm_vmid_bits - 1, 0))
0029 #define VMID_FIRST_VERSION  (1UL << kvm_arm_vmid_bits)
0030 
0031 #define NUM_USER_VMIDS      VMID_FIRST_VERSION
0032 #define vmid2idx(vmid)      ((vmid) & ~VMID_MASK)
0033 #define idx2vmid(idx)       vmid2idx(idx)
0034 
0035 /*
0036  * As vmid #0 is always reserved, we will never allocate one
0037  * as below and can be treated as invalid. This is used to
0038  * set the active_vmids on vCPU schedule out.
0039  */
0040 #define VMID_ACTIVE_INVALID     VMID_FIRST_VERSION
0041 
0042 #define vmid_gen_match(vmid) \
0043     (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
0044 
0045 static void flush_context(void)
0046 {
0047     int cpu;
0048     u64 vmid;
0049 
0050     bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
0051 
0052     for_each_possible_cpu(cpu) {
0053         vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
0054 
0055         /* Preserve reserved VMID */
0056         if (vmid == 0)
0057             vmid = per_cpu(reserved_vmids, cpu);
0058         __set_bit(vmid2idx(vmid), vmid_map);
0059         per_cpu(reserved_vmids, cpu) = vmid;
0060     }
0061 
0062     /*
0063      * Unlike ASID allocator, we expect less frequent rollover in
0064      * case of VMIDs. Hence, instead of marking the CPU as
0065      * flush_pending and issuing a local context invalidation on
0066      * the next context-switch, we broadcast TLB flush + I-cache
0067      * invalidation over the inner shareable domain on rollover.
0068      */
0069     kvm_call_hyp(__kvm_flush_vm_context);
0070 }
0071 
0072 static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
0073 {
0074     int cpu;
0075     bool hit = false;
0076 
0077     /*
0078      * Iterate over the set of reserved VMIDs looking for a match
0079      * and update to use newvmid (i.e. the same VMID in the current
0080      * generation).
0081      */
0082     for_each_possible_cpu(cpu) {
0083         if (per_cpu(reserved_vmids, cpu) == vmid) {
0084             hit = true;
0085             per_cpu(reserved_vmids, cpu) = newvmid;
0086         }
0087     }
0088 
0089     return hit;
0090 }
0091 
0092 static u64 new_vmid(struct kvm_vmid *kvm_vmid)
0093 {
0094     static u32 cur_idx = 1;
0095     u64 vmid = atomic64_read(&kvm_vmid->id);
0096     u64 generation = atomic64_read(&vmid_generation);
0097 
0098     if (vmid != 0) {
0099         u64 newvmid = generation | (vmid & ~VMID_MASK);
0100 
0101         if (check_update_reserved_vmid(vmid, newvmid)) {
0102             atomic64_set(&kvm_vmid->id, newvmid);
0103             return newvmid;
0104         }
0105 
0106         if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
0107             atomic64_set(&kvm_vmid->id, newvmid);
0108             return newvmid;
0109         }
0110     }
0111 
0112     vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
0113     if (vmid != NUM_USER_VMIDS)
0114         goto set_vmid;
0115 
0116     /* We're out of VMIDs, so increment the global generation count */
0117     generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION,
0118                          &vmid_generation);
0119     flush_context();
0120 
0121     /* We have more VMIDs than CPUs, so this will always succeed */
0122     vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
0123 
0124 set_vmid:
0125     __set_bit(vmid, vmid_map);
0126     cur_idx = vmid;
0127     vmid = idx2vmid(vmid) | generation;
0128     atomic64_set(&kvm_vmid->id, vmid);
0129     return vmid;
0130 }
0131 
0132 /* Called from vCPU sched out with preemption disabled */
0133 void kvm_arm_vmid_clear_active(void)
0134 {
0135     atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
0136 }
0137 
0138 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
0139 {
0140     unsigned long flags;
0141     u64 vmid, old_active_vmid;
0142 
0143     vmid = atomic64_read(&kvm_vmid->id);
0144 
0145     /*
0146      * Please refer comments in check_and_switch_context() in
0147      * arch/arm64/mm/context.c.
0148      *
0149      * Unlike ASID allocator, we set the active_vmids to
0150      * VMID_ACTIVE_INVALID on vCPU schedule out to avoid
0151      * reserving the VMID space needlessly on rollover.
0152      * Hence explicitly check here for a "!= 0" to
0153      * handle the sync with a concurrent rollover.
0154      */
0155     old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
0156     if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
0157         0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
0158                       old_active_vmid, vmid))
0159         return;
0160 
0161     raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
0162 
0163     /* Check that our VMID belongs to the current generation. */
0164     vmid = atomic64_read(&kvm_vmid->id);
0165     if (!vmid_gen_match(vmid))
0166         vmid = new_vmid(kvm_vmid);
0167 
0168     atomic64_set(this_cpu_ptr(&active_vmids), vmid);
0169     raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
0170 }
0171 
0172 /*
0173  * Initialize the VMID allocator
0174  */
0175 int kvm_arm_vmid_alloc_init(void)
0176 {
0177     kvm_arm_vmid_bits = kvm_get_vmid_bits();
0178 
0179     /*
0180      * Expect allocation after rollover to fail if we don't have
0181      * at least one more VMID than CPUs. VMID #0 is always reserved.
0182      */
0183     WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
0184     atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
0185     vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
0186                sizeof(*vmid_map), GFP_KERNEL);
0187     if (!vmid_map)
0188         return -ENOMEM;
0189 
0190     return 0;
0191 }
0192 
0193 void kvm_arm_vmid_alloc_free(void)
0194 {
0195     kfree(vmid_map);
0196 }