Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2020 - Google Inc
0004  * Author: Andrew Scull <ascull@google.com>
0005  */
0006 
0007 #include <linux/linkage.h>
0008 
0009 #include <asm/assembler.h>
0010 #include <asm/kvm_arm.h>
0011 #include <asm/kvm_asm.h>
0012 #include <asm/kvm_mmu.h>
0013 
0014     .text
0015 
0016 SYM_FUNC_START(__host_exit)
0017     get_host_ctxt   x0, x1
0018 
0019     /* Store the host regs x2 and x3 */
0020     stp x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
0021 
0022     /* Retrieve the host regs x0-x1 from the stack */
0023     ldp x2, x3, [sp], #16   // x0, x1
0024 
0025     /* Store the host regs x0-x1 and x4-x17 */
0026     stp x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
0027     stp x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
0028     stp x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
0029     stp x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
0030     stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]
0031     stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]
0032     stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]
0033     stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]
0034 
0035     /* Store the host regs x18-x29, lr */
0036     save_callee_saved_regs x0
0037 
0038     /* Save the host context pointer in x29 across the function call */
0039     mov x29, x0
0040     bl  handle_trap
0041 
0042     /* Restore host regs x0-x17 */
0043 __host_enter_restore_full:
0044     ldp x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
0045     ldp x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
0046     ldp x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
0047     ldp x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
0048 
0049     /* x0-7 are use for panic arguments */
0050 __host_enter_for_panic:
0051     ldp x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
0052     ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
0053     ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
0054     ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
0055     ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
0056 
0057     /* Restore host regs x18-x29, lr */
0058     restore_callee_saved_regs x29
0059 
0060     /* Do not touch any register after this! */
0061 __host_enter_without_restoring:
0062     eret
0063     sb
0064 SYM_FUNC_END(__host_exit)
0065 
0066 /*
0067  * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
0068  */
0069 SYM_FUNC_START(__host_enter)
0070     mov x29, x0
0071     b   __host_enter_restore_full
0072 SYM_FUNC_END(__host_enter)
0073 
0074 /*
0075  * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
0076  *                u64 elr, u64 par);
0077  */
0078 SYM_FUNC_START(__hyp_do_panic)
0079     /* Prepare and exit to the host's panic funciton. */
0080     mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
0081               PSR_MODE_EL1h)
0082     msr spsr_el2, lr
0083     adr_l   lr, nvhe_hyp_panic_handler
0084     hyp_kimg_va lr, x6
0085     msr elr_el2, lr
0086 
0087     mov x29, x0
0088 
0089 #ifdef CONFIG_NVHE_EL2_DEBUG
0090     /* Ensure host stage-2 is disabled */
0091     mrs x0, hcr_el2
0092     bic x0, x0, #HCR_VM
0093     msr hcr_el2, x0
0094     isb
0095     tlbi    vmalls12e1
0096     dsb nsh
0097 #endif
0098 
0099     /* Load the panic arguments into x0-7 */
0100     mrs x0, esr_el2
0101     mov x4, x3
0102     mov x3, x2
0103     hyp_pa  x3, x6
0104     get_vcpu_ptr x5, x6
0105     mrs x6, far_el2
0106     mrs x7, hpfar_el2
0107 
0108     /* Enter the host, conditionally restoring the host context. */
0109     cbz x29, __host_enter_without_restoring
0110     b   __host_enter_for_panic
0111 SYM_FUNC_END(__hyp_do_panic)
0112 
0113 SYM_FUNC_START(__host_hvc)
0114     ldp x0, x1, [sp]        // Don't fixup the stack yet
0115 
0116     /* No stub for you, sonny Jim */
0117 alternative_if ARM64_KVM_PROTECTED_MODE
0118     b   __host_exit
0119 alternative_else_nop_endif
0120 
0121     /* Check for a stub HVC call */
0122     cmp x0, #HVC_STUB_HCALL_NR
0123     b.hs    __host_exit
0124 
0125     add sp, sp, #16
0126     /*
0127      * Compute the idmap address of __kvm_handle_stub_hvc and
0128      * jump there.
0129      *
0130      * Preserve x0-x4, which may contain stub parameters.
0131      */
0132     adr_l   x5, __kvm_handle_stub_hvc
0133     hyp_pa  x5, x6
0134     br  x5
0135 SYM_FUNC_END(__host_hvc)
0136 
0137 .macro host_el1_sync_vect
0138     .align 7
0139 .L__vect_start\@:
0140     stp x0, x1, [sp, #-16]!
0141     mrs x0, esr_el2
0142     ubfx    x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
0143     cmp x0, #ESR_ELx_EC_HVC64
0144     b.eq    __host_hvc
0145     b   __host_exit
0146 .L__vect_end\@:
0147 .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
0148     .error "host_el1_sync_vect larger than vector entry"
0149 .endif
0150 .endm
0151 
0152 .macro invalid_host_el2_vect
0153     .align 7
0154 
0155     /*
0156      * Test whether the SP has overflowed, without corrupting a GPR.
0157      * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
0158      * of SP should always be 1.
0159      */
0160     add sp, sp, x0          // sp' = sp + x0
0161     sub x0, sp, x0          // x0' = sp' - x0 = (sp + x0) - x0 = sp
0162     tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
0163     sub x0, sp, x0          // x0'' = sp' - x0' = (sp + x0) - sp = x0
0164     sub sp, sp, x0          // sp'' = sp' - x0 = (sp + x0) - x0 = sp
0165 
0166     /* If a guest is loaded, panic out of it. */
0167     stp x0, x1, [sp, #-16]!
0168     get_loaded_vcpu x0, x1
0169     cbnz    x0, __guest_exit_panic
0170     add sp, sp, #16
0171 
0172     /*
0173      * The panic may not be clean if the exception is taken before the host
0174      * context has been saved by __host_exit or after the hyp context has
0175      * been partially clobbered by __host_enter.
0176      */
0177     b   hyp_panic
0178 
0179 .L__hyp_sp_overflow\@:
0180     /* Switch to the overflow stack */
0181     adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
0182 
0183     b   hyp_panic_bad_stack
0184     ASM_BUG()
0185 .endm
0186 
0187 .macro invalid_host_el1_vect
0188     .align 7
0189     mov x0, xzr     /* restore_host = false */
0190     mrs x1, spsr_el2
0191     mrs x2, elr_el2
0192     mrs x3, par_el1
0193     b   __hyp_do_panic
0194 .endm
0195 
0196 /*
0197  * The host vector does not use an ESB instruction in order to avoid consuming
0198  * SErrors that should only be consumed by the host. Guest entry is deferred by
0199  * __guest_enter if there are any pending asynchronous exceptions so hyp will
0200  * always return to the host without having consumerd host SErrors.
0201  *
0202  * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
0203  * host knows about the EL2 vectors already, and there is no point in hiding
0204  * them.
0205  */
0206     .align 11
0207 SYM_CODE_START(__kvm_hyp_host_vector)
0208     invalid_host_el2_vect           // Synchronous EL2t
0209     invalid_host_el2_vect           // IRQ EL2t
0210     invalid_host_el2_vect           // FIQ EL2t
0211     invalid_host_el2_vect           // Error EL2t
0212 
0213     invalid_host_el2_vect           // Synchronous EL2h
0214     invalid_host_el2_vect           // IRQ EL2h
0215     invalid_host_el2_vect           // FIQ EL2h
0216     invalid_host_el2_vect           // Error EL2h
0217 
0218     host_el1_sync_vect          // Synchronous 64-bit EL1/EL0
0219     invalid_host_el1_vect           // IRQ 64-bit EL1/EL0
0220     invalid_host_el1_vect           // FIQ 64-bit EL1/EL0
0221     invalid_host_el1_vect           // Error 64-bit EL1/EL0
0222 
0223     host_el1_sync_vect          // Synchronous 32-bit EL1/EL0
0224     invalid_host_el1_vect           // IRQ 32-bit EL1/EL0
0225     invalid_host_el1_vect           // FIQ 32-bit EL1/EL0
0226     invalid_host_el1_vect           // Error 32-bit EL1/EL0
0227 SYM_CODE_END(__kvm_hyp_host_vector)
0228 
0229 /*
0230  * Forward SMC with arguments in struct kvm_cpu_context, and
0231  * store the result into the same struct. Assumes SMCCC 1.2 or older.
0232  *
0233  * x0: struct kvm_cpu_context*
0234  */
0235 SYM_CODE_START(__kvm_hyp_host_forward_smc)
0236     /*
0237      * Use x18 to keep the pointer to the host context because
0238      * x18 is callee-saved in SMCCC but not in AAPCS64.
0239      */
0240     mov x18, x0
0241 
0242     ldp x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
0243     ldp x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
0244     ldp x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
0245     ldp x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
0246     ldp x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
0247     ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
0248     ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
0249     ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
0250     ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
0251 
0252     smc #0
0253 
0254     stp x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
0255     stp x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
0256     stp x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
0257     stp x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
0258     stp x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
0259     stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
0260     stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
0261     stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
0262     stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
0263 
0264     ret
0265 SYM_CODE_END(__kvm_hyp_host_forward_smc)