0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/arm-smccc.h>
0011 #include <linux/init.h>
0012 #include <linux/linkage.h>
0013
0014 #include <asm/alternative.h>
0015 #include <asm/assembler.h>
0016 #include <asm/asm-offsets.h>
0017 #include <asm/asm_pointer_auth.h>
0018 #include <asm/bug.h>
0019 #include <asm/cpufeature.h>
0020 #include <asm/errno.h>
0021 #include <asm/esr.h>
0022 #include <asm/irq.h>
0023 #include <asm/memory.h>
0024 #include <asm/mmu.h>
0025 #include <asm/processor.h>
0026 #include <asm/ptrace.h>
0027 #include <asm/scs.h>
0028 #include <asm/thread_info.h>
0029 #include <asm/asm-uaccess.h>
0030 #include <asm/unistd.h>
0031
0032 .macro clear_gp_regs
0033 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
0034 mov x\n, xzr
0035 .endr
0036 .endm
0037
0038 .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
0039 .align 7
0040 .Lventry_start\@:
0041 .if \el == 0
0042
0043
0044
0045
0046 b .Lskip_tramp_vectors_cleanup\@
0047 .if \regsize == 64
0048 mrs x30, tpidrro_el0
0049 msr tpidrro_el0, xzr
0050 .else
0051 mov x30, xzr
0052 .endif
0053 .Lskip_tramp_vectors_cleanup\@:
0054 .endif
0055
0056 sub sp, sp, #PT_REGS_SIZE
0057 #ifdef CONFIG_VMAP_STACK
0058
0059
0060
0061
0062
0063 add sp, sp, x0 // sp' = sp + x0
0064 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
0065 tbnz x0, #THREAD_SHIFT, 0f
0066 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
0067 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
0068 b el\el\ht\()_\regsize\()_\label
0069
0070 0:
0071
0072
0073
0074
0075
0076
0077
0078 msr tpidr_el0, x0
0079
0080
0081 sub x0, sp, x0
0082 msr tpidrro_el0, x0
0083
0084
0085 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
0086
0087
0088
0089
0090
0091 mrs x0, tpidr_el0 // sp of interrupted context
0092 sub x0, sp, x0 // delta with top of overflow stack
0093 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
0094 b.ne __bad_stack // no? -> bad stack pointer
0095
0096
0097 sub sp, sp, x0
0098 mrs x0, tpidrro_el0
0099 #endif
0100 b el\el\ht\()_\regsize\()_\label
0101 .org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
0102 .endm
0103
0104 .macro tramp_alias, dst, sym, tmp
0105 mov_q \dst, TRAMP_VALIAS
0106 adr_l \tmp, \sym
0107 add \dst, \dst, \tmp
0108 adr_l \tmp, .entry.tramp.text
0109 sub \dst, \dst, \tmp
0110 .endm
0111
0112
0113
0114
0115
0116 .macro apply_ssbd, state, tmp1, tmp2
0117 alternative_cb spectre_v4_patch_fw_mitigation_enable
0118 b .L__asm_ssbd_skip\@ // Patched to NOP
0119 alternative_cb_end
0120 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
0121 cbz \tmp2, .L__asm_ssbd_skip\@
0122 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
0123 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
0124 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
0125 mov w1, #\state
0126 alternative_cb smccc_patch_fw_mitigation_conduit
0127 nop // Patched to SMC/HVC #0
0128 alternative_cb_end
0129 .L__asm_ssbd_skip\@:
0130 .endm
0131
0132
0133 .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
0134 #ifdef CONFIG_ARM64_MTE
0135 .arch_extension lse
0136 alternative_if_not ARM64_MTE
0137 b 1f
0138 alternative_else_nop_endif
0139
0140
0141
0142
0143
0144 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
0145 mrs_s \tmp, SYS_TFSRE0_EL1
0146 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
0147
0148 mov \tmp, #_TIF_MTE_ASYNC_FAULT
0149 add \ti_flags, tsk, #TSK_TI_FLAGS
0150 stset \tmp, [\ti_flags]
0151 1:
0152 #endif
0153 .endm
0154
0155
0156 .macro clear_mte_async_tcf thread_sctlr
0157 #ifdef CONFIG_ARM64_MTE
0158 alternative_if ARM64_MTE
0159
0160 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
0161 dsb ish
0162 msr_s SYS_TFSRE0_EL1, xzr
0163 1:
0164 alternative_else_nop_endif
0165 #endif
0166 .endm
0167
0168 .macro mte_set_gcr, mte_ctrl, tmp
0169 #ifdef CONFIG_ARM64_MTE
0170 ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
0171 orr \tmp, \tmp, #SYS_GCR_EL1_RRND
0172 msr_s SYS_GCR_EL1, \tmp
0173 #endif
0174 .endm
0175
0176 .macro mte_set_kernel_gcr, tmp, tmp2
0177 #ifdef CONFIG_KASAN_HW_TAGS
0178 alternative_cb kasan_hw_tags_enable
0179 b 1f
0180 alternative_cb_end
0181 mov \tmp, KERNEL_GCR_EL1
0182 msr_s SYS_GCR_EL1, \tmp
0183 1:
0184 #endif
0185 .endm
0186
0187 .macro mte_set_user_gcr, tsk, tmp, tmp2
0188 #ifdef CONFIG_KASAN_HW_TAGS
0189 alternative_cb kasan_hw_tags_enable
0190 b 1f
0191 alternative_cb_end
0192 ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
0193
0194 mte_set_gcr \tmp, \tmp2
0195 1:
0196 #endif
0197 .endm
0198
0199 .macro kernel_entry, el, regsize = 64
0200 .if \regsize == 32
0201 mov w0, w0 // zero upper 32 bits of x0
0202 .endif
0203 stp x0, x1, [sp, #16 * 0]
0204 stp x2, x3, [sp, #16 * 1]
0205 stp x4, x5, [sp, #16 * 2]
0206 stp x6, x7, [sp, #16 * 3]
0207 stp x8, x9, [sp, #16 * 4]
0208 stp x10, x11, [sp, #16 * 5]
0209 stp x12, x13, [sp, #16 * 6]
0210 stp x14, x15, [sp, #16 * 7]
0211 stp x16, x17, [sp, #16 * 8]
0212 stp x18, x19, [sp, #16 * 9]
0213 stp x20, x21, [sp, #16 * 10]
0214 stp x22, x23, [sp, #16 * 11]
0215 stp x24, x25, [sp, #16 * 12]
0216 stp x26, x27, [sp, #16 * 13]
0217 stp x28, x29, [sp, #16 * 14]
0218
0219 .if \el == 0
0220 clear_gp_regs
0221 mrs x21, sp_el0
0222 ldr_this_cpu tsk, __entry_task, x20
0223 msr sp_el0, tsk
0224
0225
0226
0227
0228
0229 ldr x19, [tsk, #TSK_TI_FLAGS]
0230 disable_step_tsk x19, x20
0231
0232
0233 ldr x0, [tsk, THREAD_SCTLR_USER]
0234 check_mte_async_tcf x22, x23, x0
0235
0236 #ifdef CONFIG_ARM64_PTR_AUTH
0237 alternative_if ARM64_HAS_ADDRESS_AUTH
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
0248 __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
0249 b 2f
0250 1:
0251 mrs x0, sctlr_el1
0252 orr x0, x0, SCTLR_ELx_ENIA
0253 msr sctlr_el1, x0
0254 2:
0255 alternative_else_nop_endif
0256 #endif
0257
0258 apply_ssbd 1, x22, x23
0259
0260 mte_set_kernel_gcr x22, x23
0261
0262
0263
0264
0265
0266 alternative_if ARM64_MTE
0267 isb
0268 b 1f
0269 alternative_else_nop_endif
0270 alternative_if ARM64_HAS_ADDRESS_AUTH
0271 isb
0272 alternative_else_nop_endif
0273 1:
0274
0275 scs_load tsk
0276 .else
0277 add x21, sp, #PT_REGS_SIZE
0278 get_current_task tsk
0279 .endif
0280 mrs x22, elr_el1
0281 mrs x23, spsr_el1
0282 stp lr, x21, [sp, #S_LR]
0283
0284
0285
0286
0287
0288
0289 .if \el == 0
0290 stp xzr, xzr, [sp, #S_STACKFRAME]
0291 .else
0292 stp x29, x22, [sp, #S_STACKFRAME]
0293 .endif
0294 add x29, sp, #S_STACKFRAME
0295
0296 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
0297 alternative_if_not ARM64_HAS_PAN
0298 bl __swpan_entry_el\el
0299 alternative_else_nop_endif
0300 #endif
0301
0302 stp x22, x23, [sp, #S_PC]
0303
0304
0305 .if \el == 0
0306 mov w21, #NO_SYSCALL
0307 str w21, [sp, #S_SYSCALLNO]
0308 .endif
0309
0310 #ifdef CONFIG_ARM64_PSEUDO_NMI
0311
0312 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
0313 mrs_s x20, SYS_ICC_PMR_EL1
0314 str x20, [sp, #S_PMR_SAVE]
0315 mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
0316 msr_s SYS_ICC_PMR_EL1, x20
0317 alternative_else_nop_endif
0318 #endif
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 .endm
0329
0330 .macro kernel_exit, el
0331 .if \el != 0
0332 disable_daif
0333 .endif
0334
0335 #ifdef CONFIG_ARM64_PSEUDO_NMI
0336
0337 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
0338 ldr x20, [sp, #S_PMR_SAVE]
0339 msr_s SYS_ICC_PMR_EL1, x20
0340 mrs_s x21, SYS_ICC_CTLR_EL1
0341 tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
0342 dsb sy // Ensure priority change is seen by redistributor
0343 .L__skip_pmr_sync\@:
0344 alternative_else_nop_endif
0345 #endif
0346
0347 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
0348
0349 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
0350 alternative_if_not ARM64_HAS_PAN
0351 bl __swpan_exit_el\el
0352 alternative_else_nop_endif
0353 #endif
0354
0355 .if \el == 0
0356 ldr x23, [sp, #S_SP] // load return stack pointer
0357 msr sp_el0, x23
0358 tst x22, #PSR_MODE32_BIT // native task?
0359 b.eq 3f
0360
0361 #ifdef CONFIG_ARM64_ERRATUM_845719
0362 alternative_if ARM64_WORKAROUND_845719
0363 #ifdef CONFIG_PID_IN_CONTEXTIDR
0364 mrs x29, contextidr_el1
0365 msr contextidr_el1, x29
0366 #else
0367 msr contextidr_el1, xzr
0368 #endif
0369 alternative_else_nop_endif
0370 #endif
0371 3:
0372 scs_save tsk
0373
0374
0375 ldr x0, [tsk, THREAD_SCTLR_USER]
0376 clear_mte_async_tcf x0
0377
0378 #ifdef CONFIG_ARM64_PTR_AUTH
0379 alternative_if ARM64_HAS_ADDRESS_AUTH
0380
0381
0382
0383
0384
0385
0386
0387 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
0388 __ptrauth_keys_install_user tsk, x0, x1, x2
0389 b 2f
0390 1:
0391 mrs x0, sctlr_el1
0392 bic x0, x0, SCTLR_ELx_ENIA
0393 msr sctlr_el1, x0
0394 2:
0395 alternative_else_nop_endif
0396 #endif
0397
0398 mte_set_user_gcr tsk, x0, x1
0399
0400 apply_ssbd 0, x0, x1
0401 .endif
0402
0403 msr elr_el1, x21 // set up the return data
0404 msr spsr_el1, x22
0405 ldp x0, x1, [sp, #16 * 0]
0406 ldp x2, x3, [sp, #16 * 1]
0407 ldp x4, x5, [sp, #16 * 2]
0408 ldp x6, x7, [sp, #16 * 3]
0409 ldp x8, x9, [sp, #16 * 4]
0410 ldp x10, x11, [sp, #16 * 5]
0411 ldp x12, x13, [sp, #16 * 6]
0412 ldp x14, x15, [sp, #16 * 7]
0413 ldp x16, x17, [sp, #16 * 8]
0414 ldp x18, x19, [sp, #16 * 9]
0415 ldp x20, x21, [sp, #16 * 10]
0416 ldp x22, x23, [sp, #16 * 11]
0417 ldp x24, x25, [sp, #16 * 12]
0418 ldp x26, x27, [sp, #16 * 13]
0419 ldp x28, x29, [sp, #16 * 14]
0420
0421 .if \el == 0
0422 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
0423 ldr lr, [sp, #S_LR]
0424 add sp, sp, #PT_REGS_SIZE // restore sp
0425 eret
0426 alternative_else_nop_endif
0427 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
0428 bne 4f
0429 msr far_el1, x29
0430 tramp_alias x30, tramp_exit_native, x29
0431 br x30
0432 4:
0433 tramp_alias x30, tramp_exit_compat, x29
0434 br x30
0435 #endif
0436 .else
0437 ldr lr, [sp, #S_LR]
0438 add sp, sp, #PT_REGS_SIZE // restore sp
0439
0440
0441 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
0442
0443 eret
0444 .endif
0445 sb
0446 .endm
0447
0448 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
0449
0450
0451
0452
0453
0454
0455
0456
0457 SYM_CODE_START_LOCAL(__swpan_entry_el1)
0458 mrs x21, ttbr0_el1
0459 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
0460 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
0461 b.eq 1f // TTBR0 access already disabled
0462 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
0463 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
0464 __uaccess_ttbr0_disable x21
0465 1: ret
0466 SYM_CODE_END(__swpan_entry_el1)
0467
0468
0469
0470
0471
0472 SYM_CODE_START_LOCAL(__swpan_exit_el1)
0473 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
0474 __uaccess_ttbr0_enable x0, x1
0475 1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
0476 ret
0477 SYM_CODE_END(__swpan_exit_el1)
0478
0479 SYM_CODE_START_LOCAL(__swpan_exit_el0)
0480 __uaccess_ttbr0_enable x0, x1
0481
0482
0483
0484
0485
0486
0487 b post_ttbr_update_workaround
0488 SYM_CODE_END(__swpan_exit_el0)
0489 #endif
0490
0491
0492 tsk .req x28 // current thread_info
0493
0494 .text
0495
0496
0497
0498
0499 .pushsection ".entry.text", "ax"
0500
0501 .align 11
0502 SYM_CODE_START(vectors)
0503 kernel_ventry 1, t, 64, sync // Synchronous EL1t
0504 kernel_ventry 1, t, 64, irq // IRQ EL1t
0505 kernel_ventry 1, t, 64, fiq // FIQ EL1t
0506 kernel_ventry 1, t, 64, error // Error EL1t
0507
0508 kernel_ventry 1, h, 64, sync // Synchronous EL1h
0509 kernel_ventry 1, h, 64, irq // IRQ EL1h
0510 kernel_ventry 1, h, 64, fiq // FIQ EL1h
0511 kernel_ventry 1, h, 64, error // Error EL1h
0512
0513 kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0
0514 kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0
0515 kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0
0516 kernel_ventry 0, t, 64, error // Error 64-bit EL0
0517
0518 kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0
0519 kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0
0520 kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0
0521 kernel_ventry 0, t, 32, error // Error 32-bit EL0
0522 SYM_CODE_END(vectors)
0523
0524 #ifdef CONFIG_VMAP_STACK
0525 SYM_CODE_START_LOCAL(__bad_stack)
0526
0527
0528
0529
0530
0531
0532
0533 mrs x0, tpidrro_el0
0534
0535
0536
0537
0538
0539 sub sp, sp, #PT_REGS_SIZE
0540 kernel_entry 1
0541 mrs x0, tpidr_el0
0542 add x0, x0, #PT_REGS_SIZE
0543 str x0, [sp, #S_SP]
0544
0545
0546 mov x0, sp
0547
0548
0549 bl handle_bad_stack
0550 ASM_BUG()
0551 SYM_CODE_END(__bad_stack)
0552 #endif
0553
0554
0555 .macro entry_handler el:req, ht:req, regsize:req, label:req
0556 SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
0557 kernel_entry \el, \regsize
0558 mov x0, sp
0559 bl el\el\ht\()_\regsize\()_\label\()_handler
0560 .if \el == 0
0561 b ret_to_user
0562 .else
0563 b ret_to_kernel
0564 .endif
0565 SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
0566 .endm
0567
0568
0569
0570
0571 entry_handler 1, t, 64, sync
0572 entry_handler 1, t, 64, irq
0573 entry_handler 1, t, 64, fiq
0574 entry_handler 1, t, 64, error
0575
0576 entry_handler 1, h, 64, sync
0577 entry_handler 1, h, 64, irq
0578 entry_handler 1, h, 64, fiq
0579 entry_handler 1, h, 64, error
0580
0581 entry_handler 0, t, 64, sync
0582 entry_handler 0, t, 64, irq
0583 entry_handler 0, t, 64, fiq
0584 entry_handler 0, t, 64, error
0585
0586 entry_handler 0, t, 32, sync
0587 entry_handler 0, t, 32, irq
0588 entry_handler 0, t, 32, fiq
0589 entry_handler 0, t, 32, error
0590
0591 SYM_CODE_START_LOCAL(ret_to_kernel)
0592 kernel_exit 1
0593 SYM_CODE_END(ret_to_kernel)
0594
0595 SYM_CODE_START_LOCAL(ret_to_user)
0596 ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
0597 enable_step_tsk x19, x2
0598 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
0599 bl stackleak_erase_on_task_stack
0600 #endif
0601 kernel_exit 0
0602 SYM_CODE_END(ret_to_user)
0603
0604 .popsection // .entry.text
0605
0606 // Move from tramp_pg_dir to swapper_pg_dir
0607 .macro tramp_map_kernel, tmp
0608 mrs \tmp, ttbr1_el1
0609 add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
0610 bic \tmp, \tmp, #USER_ASID_FLAG
0611 msr ttbr1_el1, \tmp
0612 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
0613 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
0614
0615 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
0616 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
0617
0618 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
0619 isb
0620 tlbi vae1, \tmp
0621 dsb nsh
0622 alternative_else_nop_endif
0623 #endif
0624 .endm
0625
0626 // Move from swapper_pg_dir to tramp_pg_dir
0627 .macro tramp_unmap_kernel, tmp
0628 mrs \tmp, ttbr1_el1
0629 sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
0630 orr \tmp, \tmp, #USER_ASID_FLAG
0631 msr ttbr1_el1, \tmp
0632
0633
0634
0635
0636
0637 .endm
0638
0639 .macro tramp_data_read_var dst, var
0640 #ifdef CONFIG_RELOCATABLE
0641 ldr \dst, .L__tramp_data_\var
0642 .ifndef .L__tramp_data_\var
0643 .pushsection ".entry.tramp.rodata", "a", %progbits
0644 .align 3
0645 .L__tramp_data_\var:
0646 .quad \var
0647 .popsection
0648 .endif
0649 #else
0650
0651
0652
0653
0654
0655
0656
0657
0658 movz \dst, :abs_g2_s:\var
0659 movk \dst, :abs_g1_nc:\var
0660 movk \dst, :abs_g0_nc:\var
0661 #endif
0662 .endm
0663
0664 #define BHB_MITIGATION_NONE 0
0665 #define BHB_MITIGATION_LOOP 1
0666 #define BHB_MITIGATION_FW 2
0667 #define BHB_MITIGATION_INSN 3
0668
0669 .macro tramp_ventry, vector_start, regsize, kpti, bhb
0670 .align 7
0671 1:
0672 .if \regsize == 64
0673 msr tpidrro_el0, x30 // Restored in kernel_ventry
0674 .endif
0675
0676 .if \bhb == BHB_MITIGATION_LOOP
0677
0678
0679
0680
0681 __mitigate_spectre_bhb_loop x30
0682 .endif // \bhb == BHB_MITIGATION_LOOP
0683
0684 .if \bhb == BHB_MITIGATION_INSN
0685 clearbhb
0686 isb
0687 .endif // \bhb == BHB_MITIGATION_INSN
0688
0689 .if \kpti == 1
0690
0691
0692
0693
0694
0695 bl 2f
0696 b .
0697 2:
0698 tramp_map_kernel x30
0699 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
0700 tramp_data_read_var x30, vectors
0701 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
0702 prfm plil1strm, [x30, #(1b - \vector_start)]
0703 alternative_else_nop_endif
0704
0705 msr vbar_el1, x30
0706 isb
0707 .else
0708 adr_l x30, vectors
0709 .endif // \kpti == 1
0710
0711 .if \bhb == BHB_MITIGATION_FW
0712
0713
0714
0715
0716
0717 __mitigate_spectre_bhb_fw
0718 .endif // \bhb == BHB_MITIGATION_FW
0719
0720 add x30, x30, #(1b - \vector_start + 4)
0721 ret
0722 .org 1b + 128 // Did we overflow the ventry slot?
0723 .endm
0724
0725 .macro tramp_exit, regsize = 64
0726 tramp_data_read_var x30, this_cpu_vector
0727 get_this_cpu_offset x29
0728 ldr x30, [x30, x29]
0729
0730 msr vbar_el1, x30
0731 ldr lr, [sp, #S_LR]
0732 tramp_unmap_kernel x29
0733 .if \regsize == 64
0734 mrs x29, far_el1
0735 .endif
0736 add sp, sp, #PT_REGS_SIZE // restore sp
0737 eret
0738 sb
0739 .endm
0740
0741 .macro generate_tramp_vector, kpti, bhb
0742 .Lvector_start\@:
0743 .space 0x400
0744
0745 .rept 4
0746 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
0747 .endr
0748 .rept 4
0749 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
0750 .endr
0751 .endm
0752
0753 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
0754
0755
0756
0757
0758
0759 .pushsection ".entry.tramp.text", "ax"
0760 .align 11
0761 SYM_CODE_START_NOALIGN(tramp_vectors)
0762 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
0763 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
0764 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
0765 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
0766 #endif
0767 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
0768 SYM_CODE_END(tramp_vectors)
0769
0770 SYM_CODE_START(tramp_exit_native)
0771 tramp_exit
0772 SYM_CODE_END(tramp_exit_native)
0773
0774 SYM_CODE_START(tramp_exit_compat)
0775 tramp_exit 32
0776 SYM_CODE_END(tramp_exit_compat)
0777 .popsection // .entry.tramp.text
0778 #endif
0779
0780
0781
0782
0783
0784 .macro generate_el1_vector, bhb
0785 .Lvector_start\@:
0786 kernel_ventry 1, t, 64, sync // Synchronous EL1t
0787 kernel_ventry 1, t, 64, irq // IRQ EL1t
0788 kernel_ventry 1, t, 64, fiq // FIQ EL1h
0789 kernel_ventry 1, t, 64, error // Error EL1t
0790
0791 kernel_ventry 1, h, 64, sync // Synchronous EL1h
0792 kernel_ventry 1, h, 64, irq // IRQ EL1h
0793 kernel_ventry 1, h, 64, fiq // FIQ EL1h
0794 kernel_ventry 1, h, 64, error // Error EL1h
0795
0796 .rept 4
0797 tramp_ventry .Lvector_start\@, 64, 0, \bhb
0798 .endr
0799 .rept 4
0800 tramp_ventry .Lvector_start\@, 32, 0, \bhb
0801 .endr
0802 .endm
0803
0804
0805 .pushsection ".entry.text", "ax"
0806 .align 11
0807 SYM_CODE_START(__bp_harden_el1_vectors)
0808 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
0809 generate_el1_vector bhb=BHB_MITIGATION_LOOP
0810 generate_el1_vector bhb=BHB_MITIGATION_FW
0811 generate_el1_vector bhb=BHB_MITIGATION_INSN
0812 #endif
0813 SYM_CODE_END(__bp_harden_el1_vectors)
0814 .popsection
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825 SYM_FUNC_START(cpu_switch_to)
0826 mov x10, #THREAD_CPU_CONTEXT
0827 add x8, x0, x10
0828 mov x9, sp
0829 stp x19, x20, [x8], #16 // store callee-saved registers
0830 stp x21, x22, [x8], #16
0831 stp x23, x24, [x8], #16
0832 stp x25, x26, [x8], #16
0833 stp x27, x28, [x8], #16
0834 stp x29, x9, [x8], #16
0835 str lr, [x8]
0836 add x8, x1, x10
0837 ldp x19, x20, [x8], #16 // restore callee-saved registers
0838 ldp x21, x22, [x8], #16
0839 ldp x23, x24, [x8], #16
0840 ldp x25, x26, [x8], #16
0841 ldp x27, x28, [x8], #16
0842 ldp x29, x9, [x8], #16
0843 ldr lr, [x8]
0844 mov sp, x9
0845 msr sp_el0, x1
0846 ptrauth_keys_install_kernel x1, x8, x9, x10
0847 scs_save x0
0848 scs_load x1
0849 ret
0850 SYM_FUNC_END(cpu_switch_to)
0851 NOKPROBE(cpu_switch_to)
0852
0853
0854
0855
0856 SYM_CODE_START(ret_from_fork)
0857 bl schedule_tail
0858 cbz x19, 1f // not a kernel thread
0859 mov x0, x20
0860 blr x19
0861 1: get_current_task tsk
0862 mov x0, sp
0863 bl asm_exit_to_user_mode
0864 b ret_to_user
0865 SYM_CODE_END(ret_from_fork)
0866 NOKPROBE(ret_from_fork)
0867
0868
0869
0870
0871
0872
0873
0874 SYM_FUNC_START(call_on_irq_stack)
0875 #ifdef CONFIG_SHADOW_CALL_STACK
0876 stp scs_sp, xzr, [sp, #-16]!
0877 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
0878 #endif
0879
0880 stp x29, x30, [sp, #-16]!
0881 mov x29, sp
0882
0883 ldr_this_cpu x16, irq_stack_ptr, x17
0884 mov x15, #IRQ_STACK_SIZE
0885 add x16, x16, x15
0886
0887
0888 mov sp, x16
0889 blr x1
0890
0891
0892
0893
0894
0895 mov sp, x29
0896 ldp x29, x30, [sp], #16
0897 #ifdef CONFIG_SHADOW_CALL_STACK
0898 ldp scs_sp, xzr, [sp], #16
0899 #endif
0900 ret
0901 SYM_FUNC_END(call_on_irq_stack)
0902 NOKPROBE(call_on_irq_stack)
0903
0904 #ifdef CONFIG_ARM_SDE_INTERFACE
0905
0906 #include <asm/sdei.h>
0907 #include <uapi/linux/arm_sdei.h>
0908
0909 .macro sdei_handler_exit exit_mode
0910
0911 cmp \exit_mode, #SDEI_EXIT_SMC
0912 b.ne 99f
0913 smc #0
0914 b .
0915 99: hvc #0
0916 b .
0917 .endm
0918
0919 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
0920
0921
0922
0923
0924
0925
0926
0927
0928 .pushsection ".entry.tramp.text", "ax"
0929 SYM_CODE_START(__sdei_asm_entry_trampoline)
0930 mrs x4, ttbr1_el1
0931 tbz x4, #USER_ASID_BIT, 1f
0932
0933 tramp_map_kernel tmp=x4
0934 isb
0935 mov x4, xzr
0936
0937
0938
0939
0940 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
0941 tramp_data_read_var x4, __sdei_asm_handler
0942 br x4
0943 SYM_CODE_END(__sdei_asm_entry_trampoline)
0944 NOKPROBE(__sdei_asm_entry_trampoline)
0945
0946
0947
0948
0949
0950
0951
0952
0953 SYM_CODE_START(__sdei_asm_exit_trampoline)
0954 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
0955 cbnz x4, 1f
0956
0957 tramp_unmap_kernel tmp=x4
0958
0959 1: sdei_handler_exit exit_mode=x2
0960 SYM_CODE_END(__sdei_asm_exit_trampoline)
0961 NOKPROBE(__sdei_asm_exit_trampoline)
0962 .popsection // .entry.tramp.text
0963 #endif
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978 SYM_CODE_START(__sdei_asm_handler)
0979 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
0980 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
0981 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
0982 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
0983 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
0984 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
0985 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
0986 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
0987 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
0988 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
0989 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
0990 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
0991 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
0992 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
0993 mov x4, sp
0994 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
0995
0996 mov x19, x1
0997
0998 #if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
0999 ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
1000 #endif
1001
1002 #ifdef CONFIG_VMAP_STACK
1003
1004
1005
1006
1007
1008 cbnz w4, 1f
1009 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1010 b 2f
1011 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
1012 2: mov x6, #SDEI_STACK_SIZE
1013 add x5, x5, x6
1014 mov sp, x5
1015 #endif
1016
1017 #ifdef CONFIG_SHADOW_CALL_STACK
1018
1019 cbnz w4, 3f
1020 ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1021 b 4f
1022 3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
1023 4:
1024 #endif
1025
1026
1027
1028
1029
1030 mrs x28, sp_el0
1031 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
1032 msr sp_el0, x0
1033
1034
1035 and x0, x3, #0xc
1036 mrs x1, CurrentEL
1037 cmp x0, x1
1038 csel x29, x29, xzr, eq // fp, or zero
1039 csel x4, x2, xzr, eq // elr, or zero
1040
1041 stp x29, x4, [sp, #-16]!
1042 mov x29, sp
1043
1044 add x0, x19, #SDEI_EVENT_INTREGS
1045 mov x1, x19
1046 bl __sdei_handler
1047
1048 msr sp_el0, x28
1049
1050 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
1051 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1052 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1053 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1054 mov sp, x1
1055
1056 mov x1, x0 // address to complete_and_resume
1057
1058
1059
1060 cmp x0, #SDEI_EV_FAILED
1061 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1062 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1063 csel x0, x2, x3, ls
1064
1065 ldr_l x2, sdei_exit_mode
1066
1067 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1068 sdei_handler_exit exit_mode=x2
1069 alternative_else_nop_endif
1070
1071 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1072 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1073 br x5
1074 #endif
1075 SYM_CODE_END(__sdei_asm_handler)
1076 NOKPROBE(__sdei_asm_handler)
1077 #endif