0001
0002
0003
0004
0005
0006
0007 #ifndef __ARM64_KVM_HYP_H__
0008 #define __ARM64_KVM_HYP_H__
0009
0010 #include <linux/compiler.h>
0011 #include <linux/kvm_host.h>
0012 #include <asm/alternative.h>
0013 #include <asm/sysreg.h>
0014
0015 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
0016 DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
0017 DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
0018
0019 #define read_sysreg_elx(r,nvh,vh) \
0020 ({ \
0021 u64 reg; \
0022 asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \
0023 __mrs_s("%0", r##vh), \
0024 ARM64_HAS_VIRT_HOST_EXTN) \
0025 : "=r" (reg)); \
0026 reg; \
0027 })
0028
0029 #define write_sysreg_elx(v,r,nvh,vh) \
0030 do { \
0031 u64 __val = (u64)(v); \
0032 asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \
0033 __msr_s(r##vh, "%x0"), \
0034 ARM64_HAS_VIRT_HOST_EXTN) \
0035 : : "rZ" (__val)); \
0036 } while (0)
0037
0038
0039
0040
0041
0042
0043
0044 #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
0045 #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
0046 #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
0047 #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
0048 #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
0049 #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
0050
0051
0052
0053
0054
0055
0056 #define __kvm_swab32(x) ___constant_swab32(x)
0057
0058 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
0059
0060 void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
0061 void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
0062 void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
0063 void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
0064 void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
0065 void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
0066 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
0067
0068 #ifdef __KVM_NVHE_HYPERVISOR__
0069 void __timer_enable_traps(struct kvm_vcpu *vcpu);
0070 void __timer_disable_traps(struct kvm_vcpu *vcpu);
0071 #endif
0072
0073 #ifdef __KVM_NVHE_HYPERVISOR__
0074 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
0075 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
0076 #else
0077 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
0078 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
0079 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
0080 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
0081 #endif
0082
0083 void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
0084 void __debug_switch_to_host(struct kvm_vcpu *vcpu);
0085
0086 #ifdef __KVM_NVHE_HYPERVISOR__
0087 void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
0088 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
0089 #endif
0090
0091 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
0092 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
0093 void __sve_restore_state(void *sve_pffr, u32 *fpsr);
0094
0095 #ifndef __KVM_NVHE_HYPERVISOR__
0096 void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
0097 void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
0098 #endif
0099
0100 u64 __guest_enter(struct kvm_vcpu *vcpu);
0101
0102 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
0103
0104 #ifdef __KVM_NVHE_HYPERVISOR__
0105 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
0106 u64 elr, u64 par);
0107 #endif
0108
0109 #ifdef __KVM_NVHE_HYPERVISOR__
0110 void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
0111 phys_addr_t pgd, void *sp, void *cont_fn);
0112 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
0113 unsigned long *per_cpu_base, u32 hyp_va_bits);
0114 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
0115 #endif
0116
0117 extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
0118 extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
0119 extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
0120 extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
0121 extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
0122 extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
0123 extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
0124 extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
0125
0126 #endif