0001
0002
0003
0004
0005
0006
0007 #ifndef __ARM64_KVM_MMU_H__
0008 #define __ARM64_KVM_MMU_H__
0009
0010 #include <asm/page.h>
0011 #include <asm/memory.h>
0012 #include <asm/mmu.h>
0013 #include <asm/cpufeature.h>
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 #ifdef __ASSEMBLY__
0053
0054 #include <asm/alternative.h>
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 .macro kern_hyp_va reg
0066 alternative_cb kvm_update_va_mask
0067 and \reg, \reg, #1
0068 ror \reg, \reg, #1
0069 add \reg, \reg, #0
0070 add \reg, \reg, #0, lsl 12
0071 ror \reg, \reg, #63
0072 alternative_cb_end
0073 .endm
0074
0075
0076
0077
0078
0079
0080 .macro hyp_pa reg, tmp
0081 ldr_l \tmp, hyp_physvirt_offset
0082 add \reg, \reg, \tmp
0083 .endm
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 .macro hyp_kimg_va reg, tmp
0096
0097 hyp_pa \reg, \tmp
0098
0099
0100 alternative_cb kvm_get_kimage_voffset
0101 movz \tmp, #0
0102 movk \tmp, #0, lsl #16
0103 movk \tmp, #0, lsl #32
0104 movk \tmp, #0, lsl #48
0105 alternative_cb_end
0106
0107
0108 add \reg, \reg, \tmp
0109 .endm
0110
0111 #else
0112
0113 #include <linux/pgtable.h>
0114 #include <asm/pgalloc.h>
0115 #include <asm/cache.h>
0116 #include <asm/cacheflush.h>
0117 #include <asm/mmu_context.h>
0118 #include <asm/kvm_host.h>
0119
0120 void kvm_update_va_mask(struct alt_instr *alt,
0121 __le32 *origptr, __le32 *updptr, int nr_inst);
0122 void kvm_compute_layout(void);
0123 void kvm_apply_hyp_relocations(void);
0124
0125 #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
0126
0127 static __always_inline unsigned long __kern_hyp_va(unsigned long v)
0128 {
0129 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
0130 "ror %0, %0, #1\n"
0131 "add %0, %0, #0\n"
0132 "add %0, %0, #0, lsl 12\n"
0133 "ror %0, %0, #63\n",
0134 kvm_update_va_mask)
0135 : "+r" (v));
0136 return v;
0137 }
0138
0139 #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
0140
0141
0142
0143
0144
0145 #define KVM_PHYS_SHIFT (40)
0146
0147 #define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
0148 #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
0149 #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
0150
0151 #include <asm/kvm_pgtable.h>
0152 #include <asm/stage2_pgtable.h>
0153
0154 int kvm_share_hyp(void *from, void *to);
0155 void kvm_unshare_hyp(void *from, void *to);
0156 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
0157 int __create_hyp_mappings(unsigned long start, unsigned long size,
0158 unsigned long phys, enum kvm_pgtable_prot prot);
0159 int hyp_alloc_private_va_range(size_t size, unsigned long *haddr);
0160 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
0161 void __iomem **kaddr,
0162 void __iomem **haddr);
0163 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
0164 void **haddr);
0165 void free_hyp_pgds(void);
0166
0167 void stage2_unmap_vm(struct kvm *kvm);
0168 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
0169 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
0170 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
0171 phys_addr_t pa, unsigned long size, bool writable);
0172
0173 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
0174
0175 phys_addr_t kvm_mmu_get_httbr(void);
0176 phys_addr_t kvm_get_idmap_vector(void);
0177 int kvm_mmu_init(u32 *hyp_va_bits);
0178
0179 static inline void *__kvm_vector_slot2addr(void *base,
0180 enum arm64_hyp_spectre_vector slot)
0181 {
0182 int idx = slot - (slot != HYP_VECTOR_DIRECT);
0183
0184 return base + (idx * SZ_2K);
0185 }
0186
0187 struct kvm;
0188
0189 #define kvm_flush_dcache_to_poc(a,l) \
0190 dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
0191
0192 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
0193 {
0194 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
0195 }
0196
0197 static inline void __clean_dcache_guest_page(void *va, size_t size)
0198 {
0199
0200
0201
0202
0203
0204
0205 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
0206 return;
0207
0208 kvm_flush_dcache_to_poc(va, size);
0209 }
0210
0211 static inline void __invalidate_icache_guest_page(void *va, size_t size)
0212 {
0213 if (icache_is_aliasing()) {
0214
0215 icache_inval_all_pou();
0216 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
0217
0218 icache_inval_pou((unsigned long)va, (unsigned long)va + size);
0219 }
0220 }
0221
0222 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
0223 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
0224
0225 static inline unsigned int kvm_get_vmid_bits(void)
0226 {
0227 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
0228
0229 return get_vmid_bits(reg);
0230 }
0231
0232
0233
0234
0235
0236
0237 static inline int kvm_read_guest_lock(struct kvm *kvm,
0238 gpa_t gpa, void *data, unsigned long len)
0239 {
0240 int srcu_idx = srcu_read_lock(&kvm->srcu);
0241 int ret = kvm_read_guest(kvm, gpa, data, len);
0242
0243 srcu_read_unlock(&kvm->srcu, srcu_idx);
0244
0245 return ret;
0246 }
0247
0248 static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
0249 const void *data, unsigned long len)
0250 {
0251 int srcu_idx = srcu_read_lock(&kvm->srcu);
0252 int ret = kvm_write_guest(kvm, gpa, data, len);
0253
0254 srcu_read_unlock(&kvm->srcu, srcu_idx);
0255
0256 return ret;
0257 }
0258
0259 #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
0260
0261
0262
0263
0264
0265
0266 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
0267 {
0268 struct kvm_vmid *vmid = &mmu->vmid;
0269 u64 vmid_field, baddr;
0270 u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
0271
0272 baddr = mmu->pgd_phys;
0273 vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
0274 vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
0275 return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
0276 }
0277
0278
0279
0280
0281
0282 static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
0283 struct kvm_arch *arch)
0284 {
0285 write_sysreg(arch->vtcr, vtcr_el2);
0286 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
0287
0288
0289
0290
0291
0292
0293 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
0294 }
0295
0296 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
0297 {
0298 return container_of(mmu->arch, struct kvm, arch);
0299 }
0300 #endif
0301 #endif