0001
0002
0003
0004
0005
0006
0007 #include <linux/linkage.h>
0008
0009 #include <asm/assembler.h>
0010 #include <asm/kvm_arm.h>
0011 #include <asm/kvm_asm.h>
0012 #include <asm/kvm_mmu.h>
0013
0014 .text
0015
0016 SYM_FUNC_START(__host_exit)
0017 get_host_ctxt x0, x1
0018
0019
0020 stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
0021
0022
0023 ldp x2, x3, [sp], #16 // x0, x1
0024
0025
0026 stp x2, x3, [x0, #CPU_XREG_OFFSET(0)]
0027 stp x4, x5, [x0, #CPU_XREG_OFFSET(4)]
0028 stp x6, x7, [x0, #CPU_XREG_OFFSET(6)]
0029 stp x8, x9, [x0, #CPU_XREG_OFFSET(8)]
0030 stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]
0031 stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]
0032 stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]
0033 stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]
0034
0035
0036 save_callee_saved_regs x0
0037
0038
0039 mov x29, x0
0040 bl handle_trap
0041
0042
0043 __host_enter_restore_full:
0044 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
0045 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
0046 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
0047 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
0048
0049
0050 __host_enter_for_panic:
0051 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
0052 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
0053 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
0054 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
0055 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
0056
0057
0058 restore_callee_saved_regs x29
0059
0060
0061 __host_enter_without_restoring:
0062 eret
0063 sb
0064 SYM_FUNC_END(__host_exit)
0065
0066
0067
0068
0069 SYM_FUNC_START(__host_enter)
0070 mov x29, x0
0071 b __host_enter_restore_full
0072 SYM_FUNC_END(__host_enter)
0073
0074
0075
0076
0077
0078 SYM_FUNC_START(__hyp_do_panic)
0079
0080 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
0081 PSR_MODE_EL1h)
0082 msr spsr_el2, lr
0083 adr_l lr, nvhe_hyp_panic_handler
0084 hyp_kimg_va lr, x6
0085 msr elr_el2, lr
0086
0087 mov x29, x0
0088
0089 #ifdef CONFIG_NVHE_EL2_DEBUG
0090
0091 mrs x0, hcr_el2
0092 bic x0, x0, #HCR_VM
0093 msr hcr_el2, x0
0094 isb
0095 tlbi vmalls12e1
0096 dsb nsh
0097 #endif
0098
0099
0100 mrs x0, esr_el2
0101 mov x4, x3
0102 mov x3, x2
0103 hyp_pa x3, x6
0104 get_vcpu_ptr x5, x6
0105 mrs x6, far_el2
0106 mrs x7, hpfar_el2
0107
0108
0109 cbz x29, __host_enter_without_restoring
0110 b __host_enter_for_panic
0111 SYM_FUNC_END(__hyp_do_panic)
0112
0113 SYM_FUNC_START(__host_hvc)
0114 ldp x0, x1, [sp] // Don't fixup the stack yet
0115
0116
0117 alternative_if ARM64_KVM_PROTECTED_MODE
0118 b __host_exit
0119 alternative_else_nop_endif
0120
0121
0122 cmp x0, #HVC_STUB_HCALL_NR
0123 b.hs __host_exit
0124
0125 add sp, sp, #16
0126
0127
0128
0129
0130
0131
0132 adr_l x5, __kvm_handle_stub_hvc
0133 hyp_pa x5, x6
0134 br x5
0135 SYM_FUNC_END(__host_hvc)
0136
0137 .macro host_el1_sync_vect
0138 .align 7
0139 .L__vect_start\@:
0140 stp x0, x1, [sp, #-16]!
0141 mrs x0, esr_el2
0142 ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
0143 cmp x0, #ESR_ELx_EC_HVC64
0144 b.eq __host_hvc
0145 b __host_exit
0146 .L__vect_end\@:
0147 .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
0148 .error "host_el1_sync_vect larger than vector entry"
0149 .endif
0150 .endm
0151
0152 .macro invalid_host_el2_vect
0153 .align 7
0154
0155
0156
0157
0158
0159
0160 add sp, sp, x0 // sp' = sp + x0
0161 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
0162 tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
0163 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
0164 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
0165
0166
0167 stp x0, x1, [sp, #-16]!
0168 get_loaded_vcpu x0, x1
0169 cbnz x0, __guest_exit_panic
0170 add sp, sp, #16
0171
0172
0173
0174
0175
0176
0177 b hyp_panic
0178
0179 .L__hyp_sp_overflow\@:
0180
0181 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
0182
0183 b hyp_panic_bad_stack
0184 ASM_BUG()
0185 .endm
0186
0187 .macro invalid_host_el1_vect
0188 .align 7
0189 mov x0, xzr
0190 mrs x1, spsr_el2
0191 mrs x2, elr_el2
0192 mrs x3, par_el1
0193 b __hyp_do_panic
0194 .endm
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 .align 11
0207 SYM_CODE_START(__kvm_hyp_host_vector)
0208 invalid_host_el2_vect // Synchronous EL2t
0209 invalid_host_el2_vect // IRQ EL2t
0210 invalid_host_el2_vect // FIQ EL2t
0211 invalid_host_el2_vect // Error EL2t
0212
0213 invalid_host_el2_vect // Synchronous EL2h
0214 invalid_host_el2_vect // IRQ EL2h
0215 invalid_host_el2_vect // FIQ EL2h
0216 invalid_host_el2_vect // Error EL2h
0217
0218 host_el1_sync_vect // Synchronous 64-bit EL1/EL0
0219 invalid_host_el1_vect // IRQ 64-bit EL1/EL0
0220 invalid_host_el1_vect // FIQ 64-bit EL1/EL0
0221 invalid_host_el1_vect // Error 64-bit EL1/EL0
0222
0223 host_el1_sync_vect // Synchronous 32-bit EL1/EL0
0224 invalid_host_el1_vect // IRQ 32-bit EL1/EL0
0225 invalid_host_el1_vect // FIQ 32-bit EL1/EL0
0226 invalid_host_el1_vect // Error 32-bit EL1/EL0
0227 SYM_CODE_END(__kvm_hyp_host_vector)
0228
0229
0230
0231
0232
0233
0234
0235 SYM_CODE_START(__kvm_hyp_host_forward_smc)
0236
0237
0238
0239
0240 mov x18, x0
0241
0242 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
0243 ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
0244 ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
0245 ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
0246 ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
0247 ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
0248 ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
0249 ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
0250 ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
0251
0252 smc #0
0253
0254 stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
0255 stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
0256 stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
0257 stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
0258 stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
0259 stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
0260 stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
0261 stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
0262 stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
0263
0264 ret
0265 SYM_CODE_END(__kvm_hyp_host_forward_smc)