Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2015-2018 - ARM Ltd
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #include <linux/arm-smccc.h>
0008 #include <linux/linkage.h>
0009 
0010 #include <asm/alternative.h>
0011 #include <asm/assembler.h>
0012 #include <asm/cpufeature.h>
0013 #include <asm/kvm_arm.h>
0014 #include <asm/kvm_asm.h>
0015 #include <asm/mmu.h>
0016 #include <asm/spectre.h>
0017 
0018 .macro save_caller_saved_regs_vect
0019     /* x0 and x1 were saved in the vector entry */
0020     stp x2, x3,   [sp, #-16]!
0021     stp x4, x5,   [sp, #-16]!
0022     stp x6, x7,   [sp, #-16]!
0023     stp x8, x9,   [sp, #-16]!
0024     stp x10, x11, [sp, #-16]!
0025     stp x12, x13, [sp, #-16]!
0026     stp x14, x15, [sp, #-16]!
0027     stp x16, x17, [sp, #-16]!
0028 .endm
0029 
0030 .macro restore_caller_saved_regs_vect
0031     ldp x16, x17, [sp], #16
0032     ldp x14, x15, [sp], #16
0033     ldp x12, x13, [sp], #16
0034     ldp x10, x11, [sp], #16
0035     ldp x8, x9,   [sp], #16
0036     ldp x6, x7,   [sp], #16
0037     ldp x4, x5,   [sp], #16
0038     ldp x2, x3,   [sp], #16
0039     ldp x0, x1,   [sp], #16
0040 .endm
0041 
0042     .text
0043 
0044 el1_sync:               // Guest trapped into EL2
0045 
0046     mrs x0, esr_el2
0047     ubfx    x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
0048     cmp x0, #ESR_ELx_EC_HVC64
0049     ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
0050     b.ne    el1_trap
0051 
0052     /*
0053      * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
0054      * The workaround has already been applied on the host,
0055      * so let's quickly get back to the guest. We don't bother
0056      * restoring x1, as it can be clobbered anyway.
0057      */
0058     ldr x1, [sp]                // Guest's x0
0059     eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
0060     cbz w1, wa_epilogue
0061 
0062     /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
0063     eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
0064               ARM_SMCCC_ARCH_WORKAROUND_2)
0065     cbz w1, wa_epilogue
0066 
0067     eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
0068               ARM_SMCCC_ARCH_WORKAROUND_3)
0069     cbnz    w1, el1_trap
0070 
0071 wa_epilogue:
0072     mov x0, xzr
0073     add sp, sp, #16
0074     eret
0075     sb
0076 
0077 el1_trap:
0078     get_vcpu_ptr    x1, x0
0079     mov x0, #ARM_EXCEPTION_TRAP
0080     b   __guest_exit
0081 
0082 el1_irq:
0083 el1_fiq:
0084     get_vcpu_ptr    x1, x0
0085     mov x0, #ARM_EXCEPTION_IRQ
0086     b   __guest_exit
0087 
0088 el1_error:
0089     get_vcpu_ptr    x1, x0
0090     mov x0, #ARM_EXCEPTION_EL1_SERROR
0091     b   __guest_exit
0092 
0093 el2_sync:
0094     /* Check for illegal exception return */
0095     mrs x0, spsr_el2
0096     tbnz    x0, #20, 1f
0097 
0098     save_caller_saved_regs_vect
0099     stp     x29, x30, [sp, #-16]!
0100     bl  kvm_unexpected_el2_exception
0101     ldp     x29, x30, [sp], #16
0102     restore_caller_saved_regs_vect
0103 
0104     eret
0105 
0106 1:
0107     /* Let's attempt a recovery from the illegal exception return */
0108     get_vcpu_ptr    x1, x0
0109     mov x0, #ARM_EXCEPTION_IL
0110     b   __guest_exit
0111 
0112 
0113 el2_error:
0114     save_caller_saved_regs_vect
0115     stp     x29, x30, [sp, #-16]!
0116 
0117     bl  kvm_unexpected_el2_exception
0118 
0119     ldp     x29, x30, [sp], #16
0120     restore_caller_saved_regs_vect
0121 
0122     eret
0123     sb
0124 
0125 .macro invalid_vector   label, target = __guest_exit_panic
0126     .align  2
0127 SYM_CODE_START_LOCAL(\label)
0128     b \target
0129 SYM_CODE_END(\label)
0130 .endm
0131 
0132     /* None of these should ever happen */
0133     invalid_vector  el2t_sync_invalid
0134     invalid_vector  el2t_irq_invalid
0135     invalid_vector  el2t_fiq_invalid
0136     invalid_vector  el2t_error_invalid
0137     invalid_vector  el2h_irq_invalid
0138     invalid_vector  el2h_fiq_invalid
0139 
0140     .ltorg
0141 
0142     .align 11
0143 
0144 .macro check_preamble_length start, end
0145 /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
0146 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
0147     .error "KVM vector preamble length mismatch"
0148 .endif
0149 .endm
0150 
0151 .macro valid_vect target
0152     .align 7
0153 661:
0154     esb
0155     stp x0, x1, [sp, #-16]!
0156 662:
0157     b   \target
0158 
0159 check_preamble_length 661b, 662b
0160 .endm
0161 
0162 .macro invalid_vect target
0163     .align 7
0164 661:
0165     nop
0166     stp x0, x1, [sp, #-16]!
0167 662:
0168     b   \target
0169 
0170 check_preamble_length 661b, 662b
0171 .endm
0172 
0173 SYM_CODE_START(__kvm_hyp_vector)
0174     invalid_vect    el2t_sync_invalid   // Synchronous EL2t
0175     invalid_vect    el2t_irq_invalid    // IRQ EL2t
0176     invalid_vect    el2t_fiq_invalid    // FIQ EL2t
0177     invalid_vect    el2t_error_invalid  // Error EL2t
0178 
0179     valid_vect  el2_sync        // Synchronous EL2h
0180     invalid_vect    el2h_irq_invalid    // IRQ EL2h
0181     invalid_vect    el2h_fiq_invalid    // FIQ EL2h
0182     valid_vect  el2_error       // Error EL2h
0183 
0184     valid_vect  el1_sync        // Synchronous 64-bit EL1
0185     valid_vect  el1_irq         // IRQ 64-bit EL1
0186     valid_vect  el1_fiq         // FIQ 64-bit EL1
0187     valid_vect  el1_error       // Error 64-bit EL1
0188 
0189     valid_vect  el1_sync        // Synchronous 32-bit EL1
0190     valid_vect  el1_irq         // IRQ 32-bit EL1
0191     valid_vect  el1_fiq         // FIQ 32-bit EL1
0192     valid_vect  el1_error       // Error 32-bit EL1
0193 SYM_CODE_END(__kvm_hyp_vector)
0194 
0195 .macro spectrev2_smccc_wa1_smc
0196     sub sp, sp, #(8 * 4)
0197     stp x2, x3, [sp, #(8 * 0)]
0198     stp x0, x1, [sp, #(8 * 2)]
0199     alternative_cb spectre_bhb_patch_wa3
0200     /* Patched to mov WA3 when supported */
0201     mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
0202     alternative_cb_end
0203     smc #0
0204     ldp x2, x3, [sp, #(8 * 0)]
0205     add sp, sp, #(8 * 2)
0206 .endm
0207 
0208 .macro hyp_ventry   indirect, spectrev2
0209     .align  7
0210 1:  esb
0211     .if \spectrev2 != 0
0212     spectrev2_smccc_wa1_smc
0213     .else
0214     stp x0, x1, [sp, #-16]!
0215     mitigate_spectre_bhb_loop   x0
0216     mitigate_spectre_bhb_clear_insn
0217     .endif
0218     .if \indirect != 0
0219     alternative_cb  kvm_patch_vector_branch
0220     /*
0221      * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
0222      *
0223      * movz x0, #(addr & 0xffff)
0224      * movk x0, #((addr >> 16) & 0xffff), lsl #16
0225      * movk x0, #((addr >> 32) & 0xffff), lsl #32
0226      * br   x0
0227      *
0228      * Where:
0229      * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
0230      * See kvm_patch_vector_branch for details.
0231      */
0232     nop
0233     nop
0234     nop
0235     nop
0236     alternative_cb_end
0237     .endif
0238     b   __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
0239 .endm
0240 
0241 .macro generate_vectors indirect, spectrev2
0242 0:
0243     .rept 16
0244     hyp_ventry  \indirect, \spectrev2
0245     .endr
0246     .org 0b + SZ_2K     // Safety measure
0247 .endm
0248 
0249     .align  11
0250 SYM_CODE_START(__bp_harden_hyp_vecs)
0251     generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
0252     generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
0253     generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
0254 1:  .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
0255     .org 1b
0256 SYM_CODE_END(__bp_harden_hyp_vecs)