0001
0002
0003
0004
0005
0006
0007 #include <linux/kvm_host.h>
0008 #include <asm/kvm_hyp.h>
0009 #include <asm/kvm_mmu.h>
0010 #include <asm/kvm_pgtable.h>
0011 #include <asm/kvm_pkvm.h>
0012 #include <asm/spectre.h>
0013
0014 #include <nvhe/early_alloc.h>
0015 #include <nvhe/gfp.h>
0016 #include <nvhe/memory.h>
0017 #include <nvhe/mm.h>
0018 #include <nvhe/spinlock.h>
0019
0020 struct kvm_pgtable pkvm_pgtable;
0021 hyp_spinlock_t pkvm_pgd_lock;
0022
0023 struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
0024 unsigned int hyp_memblock_nr;
0025
0026 static u64 __io_map_base;
0027
0028 static int __pkvm_create_mappings(unsigned long start, unsigned long size,
0029 unsigned long phys, enum kvm_pgtable_prot prot)
0030 {
0031 int err;
0032
0033 hyp_spin_lock(&pkvm_pgd_lock);
0034 err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
0035 hyp_spin_unlock(&pkvm_pgd_lock);
0036
0037 return err;
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
0051 {
0052 unsigned long base, addr;
0053 int ret = 0;
0054
0055 hyp_spin_lock(&pkvm_pgd_lock);
0056
0057
0058 addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
0059
0060
0061 base = addr + PAGE_ALIGN(size);
0062
0063
0064 if (!addr || base > __hyp_vmemmap)
0065 ret = -ENOMEM;
0066 else {
0067 __io_map_base = base;
0068 *haddr = addr;
0069 }
0070
0071 hyp_spin_unlock(&pkvm_pgd_lock);
0072
0073 return ret;
0074 }
0075
0076 int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
0077 enum kvm_pgtable_prot prot,
0078 unsigned long *haddr)
0079 {
0080 unsigned long addr;
0081 int err;
0082
0083 size = PAGE_ALIGN(size + offset_in_page(phys));
0084 err = pkvm_alloc_private_va_range(size, &addr);
0085 if (err)
0086 return err;
0087
0088 err = __pkvm_create_mappings(addr, size, phys, prot);
0089 if (err)
0090 return err;
0091
0092 *haddr = addr + offset_in_page(phys);
0093 return err;
0094 }
0095
0096 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
0097 {
0098 unsigned long start = (unsigned long)from;
0099 unsigned long end = (unsigned long)to;
0100 unsigned long virt_addr;
0101 phys_addr_t phys;
0102
0103 hyp_assert_lock_held(&pkvm_pgd_lock);
0104
0105 start = start & PAGE_MASK;
0106 end = PAGE_ALIGN(end);
0107
0108 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
0109 int err;
0110
0111 phys = hyp_virt_to_phys((void *)virt_addr);
0112 err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
0113 phys, prot);
0114 if (err)
0115 return err;
0116 }
0117
0118 return 0;
0119 }
0120
0121 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
0122 {
0123 int ret;
0124
0125 hyp_spin_lock(&pkvm_pgd_lock);
0126 ret = pkvm_create_mappings_locked(from, to, prot);
0127 hyp_spin_unlock(&pkvm_pgd_lock);
0128
0129 return ret;
0130 }
0131
0132 int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
0133 {
0134 unsigned long start, end;
0135
0136 hyp_vmemmap_range(phys, size, &start, &end);
0137
0138 return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
0139 }
0140
0141 static void *__hyp_bp_vect_base;
0142 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
0143 {
0144 void *vector;
0145
0146 switch (slot) {
0147 case HYP_VECTOR_DIRECT: {
0148 vector = __kvm_hyp_vector;
0149 break;
0150 }
0151 case HYP_VECTOR_SPECTRE_DIRECT: {
0152 vector = __bp_harden_hyp_vecs;
0153 break;
0154 }
0155 case HYP_VECTOR_INDIRECT:
0156 case HYP_VECTOR_SPECTRE_INDIRECT: {
0157 vector = (void *)__hyp_bp_vect_base;
0158 break;
0159 }
0160 default:
0161 return -EINVAL;
0162 }
0163
0164 vector = __kvm_vector_slot2addr(vector, slot);
0165 *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
0166
0167 return 0;
0168 }
0169
0170 int hyp_map_vectors(void)
0171 {
0172 phys_addr_t phys;
0173 unsigned long bp_base;
0174 int ret;
0175
0176 if (!kvm_system_needs_idmapped_vectors()) {
0177 __hyp_bp_vect_base = __bp_harden_hyp_vecs;
0178 return 0;
0179 }
0180
0181 phys = __hyp_pa(__bp_harden_hyp_vecs);
0182 ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
0183 PAGE_HYP_EXEC, &bp_base);
0184 if (ret)
0185 return ret;
0186
0187 __hyp_bp_vect_base = (void *)bp_base;
0188
0189 return 0;
0190 }
0191
0192 int hyp_create_idmap(u32 hyp_va_bits)
0193 {
0194 unsigned long start, end;
0195
0196 start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
0197 start = ALIGN_DOWN(start, PAGE_SIZE);
0198
0199 end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
0200 end = ALIGN(end, PAGE_SIZE);
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 __io_map_base = start & BIT(hyp_va_bits - 2);
0211 __io_map_base ^= BIT(hyp_va_bits - 2);
0212 __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
0213
0214 return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
0215 }