Back to home page

LXR

 
 

    


0001 /*
0002  * Low-level exception handling code
0003  *
0004  * Copyright (C) 2012 ARM Ltd.
0005  * Authors: Catalin Marinas <catalin.marinas@arm.com>
0006  *      Will Deacon <will.deacon@arm.com>
0007  *
0008  * This program is free software; you can redistribute it and/or modify
0009  * it under the terms of the GNU General Public License version 2 as
0010  * published by the Free Software Foundation.
0011  *
0012  * This program is distributed in the hope that it will be useful,
0013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0015  * GNU General Public License for more details.
0016  *
0017  * You should have received a copy of the GNU General Public License
0018  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
0019  */
0020 
0021 #include <linux/init.h>
0022 #include <linux/linkage.h>
0023 
0024 #include <asm/alternative.h>
0025 #include <asm/assembler.h>
0026 #include <asm/asm-offsets.h>
0027 #include <asm/cpufeature.h>
0028 #include <asm/errno.h>
0029 #include <asm/esr.h>
0030 #include <asm/irq.h>
0031 #include <asm/memory.h>
0032 #include <asm/ptrace.h>
0033 #include <asm/thread_info.h>
0034 #include <asm/asm-uaccess.h>
0035 #include <asm/unistd.h>
0036 
0037 /*
0038  * Context tracking subsystem.  Used to instrument transitions
0039  * between user and kernel mode.
0040  */
0041     .macro ct_user_exit, syscall = 0
0042 #ifdef CONFIG_CONTEXT_TRACKING
0043     bl  context_tracking_user_exit
0044     .if \syscall == 1
0045     /*
0046      * Save/restore needed during syscalls.  Restore syscall arguments from
0047      * the values already saved on stack during kernel_entry.
0048      */
0049     ldp x0, x1, [sp]
0050     ldp x2, x3, [sp, #S_X2]
0051     ldp x4, x5, [sp, #S_X4]
0052     ldp x6, x7, [sp, #S_X6]
0053     .endif
0054 #endif
0055     .endm
0056 
0057     .macro ct_user_enter
0058 #ifdef CONFIG_CONTEXT_TRACKING
0059     bl  context_tracking_user_enter
0060 #endif
0061     .endm
0062 
0063 /*
0064  * Bad Abort numbers
0065  *-----------------
0066  */
0067 #define BAD_SYNC    0
0068 #define BAD_IRQ     1
0069 #define BAD_FIQ     2
0070 #define BAD_ERROR   3
0071 
0072     .macro  kernel_entry, el, regsize = 64
0073     sub sp, sp, #S_FRAME_SIZE
0074     .if \regsize == 32
0075     mov w0, w0              // zero upper 32 bits of x0
0076     .endif
0077     stp x0, x1, [sp, #16 * 0]
0078     stp x2, x3, [sp, #16 * 1]
0079     stp x4, x5, [sp, #16 * 2]
0080     stp x6, x7, [sp, #16 * 3]
0081     stp x8, x9, [sp, #16 * 4]
0082     stp x10, x11, [sp, #16 * 5]
0083     stp x12, x13, [sp, #16 * 6]
0084     stp x14, x15, [sp, #16 * 7]
0085     stp x16, x17, [sp, #16 * 8]
0086     stp x18, x19, [sp, #16 * 9]
0087     stp x20, x21, [sp, #16 * 10]
0088     stp x22, x23, [sp, #16 * 11]
0089     stp x24, x25, [sp, #16 * 12]
0090     stp x26, x27, [sp, #16 * 13]
0091     stp x28, x29, [sp, #16 * 14]
0092 
0093     .if \el == 0
0094     mrs x21, sp_el0
0095     ldr_this_cpu    tsk, __entry_task, x20  // Ensure MDSCR_EL1.SS is clear,
0096     ldr x19, [tsk, #TSK_TI_FLAGS]   // since we can unmask debug
0097     disable_step_tsk x19, x20       // exceptions when scheduling.
0098 
0099     mov x29, xzr            // fp pointed to user-space
0100     .else
0101     add x21, sp, #S_FRAME_SIZE
0102     get_thread_info tsk
0103     /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
0104     ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
0105     str x20, [sp, #S_ORIG_ADDR_LIMIT]
0106     mov x20, #TASK_SIZE_64
0107     str x20, [tsk, #TSK_TI_ADDR_LIMIT]
0108     /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
0109     .endif /* \el == 0 */
0110     mrs x22, elr_el1
0111     mrs x23, spsr_el1
0112     stp lr, x21, [sp, #S_LR]
0113 
0114 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
0115     /*
0116      * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
0117      * EL0, there is no need to check the state of TTBR0_EL1 since
0118      * accesses are always enabled.
0119      * Note that the meaning of this bit differs from the ARMv8.1 PAN
0120      * feature as all TTBR0_EL1 accesses are disabled, not just those to
0121      * user mappings.
0122      */
0123 alternative_if ARM64_HAS_PAN
0124     b   1f              // skip TTBR0 PAN
0125 alternative_else_nop_endif
0126 
0127     .if \el != 0
0128     mrs x21, ttbr0_el1
0129     tst x21, #0xffff << 48      // Check for the reserved ASID
0130     orr x23, x23, #PSR_PAN_BIT      // Set the emulated PAN in the saved SPSR
0131     b.eq    1f              // TTBR0 access already disabled
0132     and x23, x23, #~PSR_PAN_BIT     // Clear the emulated PAN in the saved SPSR
0133     .endif
0134 
0135     __uaccess_ttbr0_disable x21
0136 1:
0137 #endif
0138 
0139     stp x22, x23, [sp, #S_PC]
0140 
0141     /*
0142      * Set syscallno to -1 by default (overridden later if real syscall).
0143      */
0144     .if \el == 0
0145     mvn x21, xzr
0146     str x21, [sp, #S_SYSCALLNO]
0147     .endif
0148 
0149     /*
0150      * Set sp_el0 to current thread_info.
0151      */
0152     .if \el == 0
0153     msr sp_el0, tsk
0154     .endif
0155 
0156     /*
0157      * Registers that may be useful after this macro is invoked:
0158      *
0159      * x21 - aborted SP
0160      * x22 - aborted PC
0161      * x23 - aborted PSTATE
0162     */
0163     .endm
0164 
0165     .macro  kernel_exit, el
0166     .if \el != 0
0167     /* Restore the task's original addr_limit. */
0168     ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
0169     str x20, [tsk, #TSK_TI_ADDR_LIMIT]
0170 
0171     /* No need to restore UAO, it will be restored from SPSR_EL1 */
0172     .endif
0173 
0174     ldp x21, x22, [sp, #S_PC]       // load ELR, SPSR
0175     .if \el == 0
0176     ct_user_enter
0177     .endif
0178 
0179 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
0180     /*
0181      * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
0182      * PAN bit checking.
0183      */
0184 alternative_if ARM64_HAS_PAN
0185     b   2f              // skip TTBR0 PAN
0186 alternative_else_nop_endif
0187 
0188     .if \el != 0
0189     tbnz    x22, #22, 1f            // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
0190     .endif
0191 
0192     __uaccess_ttbr0_enable x0
0193 
0194     .if \el == 0
0195     /*
0196      * Enable errata workarounds only if returning to user. The only
0197      * workaround currently required for TTBR0_EL1 changes are for the
0198      * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
0199      * corruption).
0200      */
0201     post_ttbr0_update_workaround
0202     .endif
0203 1:
0204     .if \el != 0
0205     and x22, x22, #~PSR_PAN_BIT     // ARMv8.0 CPUs do not understand this bit
0206     .endif
0207 2:
0208 #endif
0209 
0210     .if \el == 0
0211     ldr x23, [sp, #S_SP]        // load return stack pointer
0212     msr sp_el0, x23
0213 #ifdef CONFIG_ARM64_ERRATUM_845719
0214 alternative_if ARM64_WORKAROUND_845719
0215     tbz x22, #4, 1f
0216 #ifdef CONFIG_PID_IN_CONTEXTIDR
0217     mrs x29, contextidr_el1
0218     msr contextidr_el1, x29
0219 #else
0220     msr contextidr_el1, xzr
0221 #endif
0222 1:
0223 alternative_else_nop_endif
0224 #endif
0225     .endif
0226 
0227     msr elr_el1, x21            // set up the return data
0228     msr spsr_el1, x22
0229     ldp x0, x1, [sp, #16 * 0]
0230     ldp x2, x3, [sp, #16 * 1]
0231     ldp x4, x5, [sp, #16 * 2]
0232     ldp x6, x7, [sp, #16 * 3]
0233     ldp x8, x9, [sp, #16 * 4]
0234     ldp x10, x11, [sp, #16 * 5]
0235     ldp x12, x13, [sp, #16 * 6]
0236     ldp x14, x15, [sp, #16 * 7]
0237     ldp x16, x17, [sp, #16 * 8]
0238     ldp x18, x19, [sp, #16 * 9]
0239     ldp x20, x21, [sp, #16 * 10]
0240     ldp x22, x23, [sp, #16 * 11]
0241     ldp x24, x25, [sp, #16 * 12]
0242     ldp x26, x27, [sp, #16 * 13]
0243     ldp x28, x29, [sp, #16 * 14]
0244     ldr lr, [sp, #S_LR]
0245     add sp, sp, #S_FRAME_SIZE       // restore sp
0246     eret                    // return to kernel
0247     .endm
0248 
0249     .macro  irq_stack_entry
0250     mov x19, sp         // preserve the original sp
0251 
0252     /*
0253      * Compare sp with the base of the task stack.
0254      * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
0255      * and should switch to the irq stack.
0256      */
0257     ldr x25, [tsk, TSK_STACK]
0258     eor x25, x25, x19
0259     and x25, x25, #~(THREAD_SIZE - 1)
0260     cbnz    x25, 9998f
0261 
0262     adr_this_cpu x25, irq_stack, x26
0263     mov x26, #IRQ_STACK_START_SP
0264     add x26, x25, x26
0265 
0266     /* switch to the irq stack */
0267     mov sp, x26
0268 
0269     /*
0270      * Add a dummy stack frame, this non-standard format is fixed up
0271      * by unwind_frame()
0272      */
0273     stp     x29, x19, [sp, #-16]!
0274     mov x29, sp
0275 
0276 9998:
0277     .endm
0278 
0279     /*
0280      * x19 should be preserved between irq_stack_entry and
0281      * irq_stack_exit.
0282      */
0283     .macro  irq_stack_exit
0284     mov sp, x19
0285     .endm
0286 
0287 /*
0288  * These are the registers used in the syscall handler, and allow us to
0289  * have in theory up to 7 arguments to a function - x0 to x6.
0290  *
0291  * x7 is reserved for the system call number in 32-bit mode.
0292  */
0293 sc_nr   .req    x25     // number of system calls
0294 scno    .req    x26     // syscall number
0295 stbl    .req    x27     // syscall table pointer
0296 tsk .req    x28     // current thread_info
0297 
0298 /*
0299  * Interrupt handling.
0300  */
0301     .macro  irq_handler
0302     ldr_l   x1, handle_arch_irq
0303     mov x0, sp
0304     irq_stack_entry
0305     blr x1
0306     irq_stack_exit
0307     .endm
0308 
0309     .text
0310 
0311 /*
0312  * Exception vectors.
0313  */
0314     .pushsection ".entry.text", "ax"
0315 
0316     .align  11
0317 ENTRY(vectors)
0318     ventry  el1_sync_invalid        // Synchronous EL1t
0319     ventry  el1_irq_invalid         // IRQ EL1t
0320     ventry  el1_fiq_invalid         // FIQ EL1t
0321     ventry  el1_error_invalid       // Error EL1t
0322 
0323     ventry  el1_sync            // Synchronous EL1h
0324     ventry  el1_irq             // IRQ EL1h
0325     ventry  el1_fiq_invalid         // FIQ EL1h
0326     ventry  el1_error_invalid       // Error EL1h
0327 
0328     ventry  el0_sync            // Synchronous 64-bit EL0
0329     ventry  el0_irq             // IRQ 64-bit EL0
0330     ventry  el0_fiq_invalid         // FIQ 64-bit EL0
0331     ventry  el0_error_invalid       // Error 64-bit EL0
0332 
0333 #ifdef CONFIG_COMPAT
0334     ventry  el0_sync_compat         // Synchronous 32-bit EL0
0335     ventry  el0_irq_compat          // IRQ 32-bit EL0
0336     ventry  el0_fiq_invalid_compat      // FIQ 32-bit EL0
0337     ventry  el0_error_invalid_compat    // Error 32-bit EL0
0338 #else
0339     ventry  el0_sync_invalid        // Synchronous 32-bit EL0
0340     ventry  el0_irq_invalid         // IRQ 32-bit EL0
0341     ventry  el0_fiq_invalid         // FIQ 32-bit EL0
0342     ventry  el0_error_invalid       // Error 32-bit EL0
0343 #endif
0344 END(vectors)
0345 
0346 /*
0347  * Invalid mode handlers
0348  */
0349     .macro  inv_entry, el, reason, regsize = 64
0350     kernel_entry \el, \regsize
0351     mov x0, sp
0352     mov x1, #\reason
0353     mrs x2, esr_el1
0354     b   bad_mode
0355     .endm
0356 
0357 el0_sync_invalid:
0358     inv_entry 0, BAD_SYNC
0359 ENDPROC(el0_sync_invalid)
0360 
0361 el0_irq_invalid:
0362     inv_entry 0, BAD_IRQ
0363 ENDPROC(el0_irq_invalid)
0364 
0365 el0_fiq_invalid:
0366     inv_entry 0, BAD_FIQ
0367 ENDPROC(el0_fiq_invalid)
0368 
0369 el0_error_invalid:
0370     inv_entry 0, BAD_ERROR
0371 ENDPROC(el0_error_invalid)
0372 
0373 #ifdef CONFIG_COMPAT
0374 el0_fiq_invalid_compat:
0375     inv_entry 0, BAD_FIQ, 32
0376 ENDPROC(el0_fiq_invalid_compat)
0377 
0378 el0_error_invalid_compat:
0379     inv_entry 0, BAD_ERROR, 32
0380 ENDPROC(el0_error_invalid_compat)
0381 #endif
0382 
0383 el1_sync_invalid:
0384     inv_entry 1, BAD_SYNC
0385 ENDPROC(el1_sync_invalid)
0386 
0387 el1_irq_invalid:
0388     inv_entry 1, BAD_IRQ
0389 ENDPROC(el1_irq_invalid)
0390 
0391 el1_fiq_invalid:
0392     inv_entry 1, BAD_FIQ
0393 ENDPROC(el1_fiq_invalid)
0394 
0395 el1_error_invalid:
0396     inv_entry 1, BAD_ERROR
0397 ENDPROC(el1_error_invalid)
0398 
0399 /*
0400  * EL1 mode handlers.
0401  */
0402     .align  6
0403 el1_sync:
0404     kernel_entry 1
0405     mrs x1, esr_el1         // read the syndrome register
0406     lsr x24, x1, #ESR_ELx_EC_SHIFT  // exception class
0407     cmp x24, #ESR_ELx_EC_DABT_CUR   // data abort in EL1
0408     b.eq    el1_da
0409     cmp x24, #ESR_ELx_EC_IABT_CUR   // instruction abort in EL1
0410     b.eq    el1_ia
0411     cmp x24, #ESR_ELx_EC_SYS64      // configurable trap
0412     b.eq    el1_undef
0413     cmp x24, #ESR_ELx_EC_SP_ALIGN   // stack alignment exception
0414     b.eq    el1_sp_pc
0415     cmp x24, #ESR_ELx_EC_PC_ALIGN   // pc alignment exception
0416     b.eq    el1_sp_pc
0417     cmp x24, #ESR_ELx_EC_UNKNOWN    // unknown exception in EL1
0418     b.eq    el1_undef
0419     cmp x24, #ESR_ELx_EC_BREAKPT_CUR    // debug exception in EL1
0420     b.ge    el1_dbg
0421     b   el1_inv
0422 
0423 el1_ia:
0424     /*
0425      * Fall through to the Data abort case
0426      */
0427 el1_da:
0428     /*
0429      * Data abort handling
0430      */
0431     mrs x0, far_el1
0432     enable_dbg
0433     // re-enable interrupts if they were enabled in the aborted context
0434     tbnz    x23, #7, 1f         // PSR_I_BIT
0435     enable_irq
0436 1:
0437     mov x2, sp              // struct pt_regs
0438     bl  do_mem_abort
0439 
0440     // disable interrupts before pulling preserved data off the stack
0441     disable_irq
0442     kernel_exit 1
0443 el1_sp_pc:
0444     /*
0445      * Stack or PC alignment exception handling
0446      */
0447     mrs x0, far_el1
0448     enable_dbg
0449     mov x2, sp
0450     b   do_sp_pc_abort
0451 el1_undef:
0452     /*
0453      * Undefined instruction
0454      */
0455     enable_dbg
0456     mov x0, sp
0457     b   do_undefinstr
0458 el1_dbg:
0459     /*
0460      * Debug exception handling
0461      */
0462     cmp x24, #ESR_ELx_EC_BRK64      // if BRK64
0463     cinc    x24, x24, eq            // set bit '0'
0464     tbz x24, #0, el1_inv        // EL1 only
0465     mrs x0, far_el1
0466     mov x2, sp              // struct pt_regs
0467     bl  do_debug_exception
0468     kernel_exit 1
0469 el1_inv:
0470     // TODO: add support for undefined instructions in kernel mode
0471     enable_dbg
0472     mov x0, sp
0473     mov x2, x1
0474     mov x1, #BAD_SYNC
0475     b   bad_mode
0476 ENDPROC(el1_sync)
0477 
0478     .align  6
0479 el1_irq:
0480     kernel_entry 1
0481     enable_dbg
0482 #ifdef CONFIG_TRACE_IRQFLAGS
0483     bl  trace_hardirqs_off
0484 #endif
0485 
0486     irq_handler
0487 
0488 #ifdef CONFIG_PREEMPT
0489     ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
0490     cbnz    w24, 1f             // preempt count != 0
0491     ldr x0, [tsk, #TSK_TI_FLAGS]    // get flags
0492     tbz x0, #TIF_NEED_RESCHED, 1f   // needs rescheduling?
0493     bl  el1_preempt
0494 1:
0495 #endif
0496 #ifdef CONFIG_TRACE_IRQFLAGS
0497     bl  trace_hardirqs_on
0498 #endif
0499     kernel_exit 1
0500 ENDPROC(el1_irq)
0501 
0502 #ifdef CONFIG_PREEMPT
0503 el1_preempt:
0504     mov x24, lr
0505 1:  bl  preempt_schedule_irq        // irq en/disable is done inside
0506     ldr x0, [tsk, #TSK_TI_FLAGS]    // get new tasks TI_FLAGS
0507     tbnz    x0, #TIF_NEED_RESCHED, 1b   // needs rescheduling?
0508     ret x24
0509 #endif
0510 
0511 /*
0512  * EL0 mode handlers.
0513  */
0514     .align  6
0515 el0_sync:
0516     kernel_entry 0
0517     mrs x25, esr_el1            // read the syndrome register
0518     lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
0519     cmp x24, #ESR_ELx_EC_SVC64      // SVC in 64-bit state
0520     b.eq    el0_svc
0521     cmp x24, #ESR_ELx_EC_DABT_LOW   // data abort in EL0
0522     b.eq    el0_da
0523     cmp x24, #ESR_ELx_EC_IABT_LOW   // instruction abort in EL0
0524     b.eq    el0_ia
0525     cmp x24, #ESR_ELx_EC_FP_ASIMD   // FP/ASIMD access
0526     b.eq    el0_fpsimd_acc
0527     cmp x24, #ESR_ELx_EC_FP_EXC64   // FP/ASIMD exception
0528     b.eq    el0_fpsimd_exc
0529     cmp x24, #ESR_ELx_EC_SYS64      // configurable trap
0530     b.eq    el0_sys
0531     cmp x24, #ESR_ELx_EC_SP_ALIGN   // stack alignment exception
0532     b.eq    el0_sp_pc
0533     cmp x24, #ESR_ELx_EC_PC_ALIGN   // pc alignment exception
0534     b.eq    el0_sp_pc
0535     cmp x24, #ESR_ELx_EC_UNKNOWN    // unknown exception in EL0
0536     b.eq    el0_undef
0537     cmp x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
0538     b.ge    el0_dbg
0539     b   el0_inv
0540 
0541 #ifdef CONFIG_COMPAT
0542     .align  6
0543 el0_sync_compat:
0544     kernel_entry 0, 32
0545     mrs x25, esr_el1            // read the syndrome register
0546     lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
0547     cmp x24, #ESR_ELx_EC_SVC32      // SVC in 32-bit state
0548     b.eq    el0_svc_compat
0549     cmp x24, #ESR_ELx_EC_DABT_LOW   // data abort in EL0
0550     b.eq    el0_da
0551     cmp x24, #ESR_ELx_EC_IABT_LOW   // instruction abort in EL0
0552     b.eq    el0_ia
0553     cmp x24, #ESR_ELx_EC_FP_ASIMD   // FP/ASIMD access
0554     b.eq    el0_fpsimd_acc
0555     cmp x24, #ESR_ELx_EC_FP_EXC32   // FP/ASIMD exception
0556     b.eq    el0_fpsimd_exc
0557     cmp x24, #ESR_ELx_EC_PC_ALIGN   // pc alignment exception
0558     b.eq    el0_sp_pc
0559     cmp x24, #ESR_ELx_EC_UNKNOWN    // unknown exception in EL0
0560     b.eq    el0_undef
0561     cmp x24, #ESR_ELx_EC_CP15_32    // CP15 MRC/MCR trap
0562     b.eq    el0_undef
0563     cmp x24, #ESR_ELx_EC_CP15_64    // CP15 MRRC/MCRR trap
0564     b.eq    el0_undef
0565     cmp x24, #ESR_ELx_EC_CP14_MR    // CP14 MRC/MCR trap
0566     b.eq    el0_undef
0567     cmp x24, #ESR_ELx_EC_CP14_LS    // CP14 LDC/STC trap
0568     b.eq    el0_undef
0569     cmp x24, #ESR_ELx_EC_CP14_64    // CP14 MRRC/MCRR trap
0570     b.eq    el0_undef
0571     cmp x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
0572     b.ge    el0_dbg
0573     b   el0_inv
0574 el0_svc_compat:
0575     /*
0576      * AArch32 syscall handling
0577      */
0578     adrp    stbl, compat_sys_call_table // load compat syscall table pointer
0579     uxtw    scno, w7            // syscall number in w7 (r7)
0580     mov     sc_nr, #__NR_compat_syscalls
0581     b   el0_svc_naked
0582 
0583     .align  6
0584 el0_irq_compat:
0585     kernel_entry 0, 32
0586     b   el0_irq_naked
0587 #endif
0588 
0589 el0_da:
0590     /*
0591      * Data abort handling
0592      */
0593     mrs x26, far_el1
0594     // enable interrupts before calling the main handler
0595     enable_dbg_and_irq
0596     ct_user_exit
0597     bic x0, x26, #(0xff << 56)
0598     mov x1, x25
0599     mov x2, sp
0600     bl  do_mem_abort
0601     b   ret_to_user
0602 el0_ia:
0603     /*
0604      * Instruction abort handling
0605      */
0606     mrs x26, far_el1
0607     // enable interrupts before calling the main handler
0608     enable_dbg_and_irq
0609     ct_user_exit
0610     mov x0, x26
0611     mov x1, x25
0612     mov x2, sp
0613     bl  do_mem_abort
0614     b   ret_to_user
0615 el0_fpsimd_acc:
0616     /*
0617      * Floating Point or Advanced SIMD access
0618      */
0619     enable_dbg
0620     ct_user_exit
0621     mov x0, x25
0622     mov x1, sp
0623     bl  do_fpsimd_acc
0624     b   ret_to_user
0625 el0_fpsimd_exc:
0626     /*
0627      * Floating Point or Advanced SIMD exception
0628      */
0629     enable_dbg
0630     ct_user_exit
0631     mov x0, x25
0632     mov x1, sp
0633     bl  do_fpsimd_exc
0634     b   ret_to_user
0635 el0_sp_pc:
0636     /*
0637      * Stack or PC alignment exception handling
0638      */
0639     mrs x26, far_el1
0640     // enable interrupts before calling the main handler
0641     enable_dbg_and_irq
0642     ct_user_exit
0643     mov x0, x26
0644     mov x1, x25
0645     mov x2, sp
0646     bl  do_sp_pc_abort
0647     b   ret_to_user
0648 el0_undef:
0649     /*
0650      * Undefined instruction
0651      */
0652     // enable interrupts before calling the main handler
0653     enable_dbg_and_irq
0654     ct_user_exit
0655     mov x0, sp
0656     bl  do_undefinstr
0657     b   ret_to_user
0658 el0_sys:
0659     /*
0660      * System instructions, for trapped cache maintenance instructions
0661      */
0662     enable_dbg_and_irq
0663     ct_user_exit
0664     mov x0, x25
0665     mov x1, sp
0666     bl  do_sysinstr
0667     b   ret_to_user
0668 el0_dbg:
0669     /*
0670      * Debug exception handling
0671      */
0672     tbnz    x24, #0, el0_inv        // EL0 only
0673     mrs x0, far_el1
0674     mov x1, x25
0675     mov x2, sp
0676     bl  do_debug_exception
0677     enable_dbg
0678     ct_user_exit
0679     b   ret_to_user
0680 el0_inv:
0681     enable_dbg
0682     ct_user_exit
0683     mov x0, sp
0684     mov x1, #BAD_SYNC
0685     mov x2, x25
0686     bl  bad_el0_sync
0687     b   ret_to_user
0688 ENDPROC(el0_sync)
0689 
0690     .align  6
0691 el0_irq:
0692     kernel_entry 0
0693 el0_irq_naked:
0694     enable_dbg
0695 #ifdef CONFIG_TRACE_IRQFLAGS
0696     bl  trace_hardirqs_off
0697 #endif
0698 
0699     ct_user_exit
0700     irq_handler
0701 
0702 #ifdef CONFIG_TRACE_IRQFLAGS
0703     bl  trace_hardirqs_on
0704 #endif
0705     b   ret_to_user
0706 ENDPROC(el0_irq)
0707 
0708 /*
0709  * Register switch for AArch64. The callee-saved registers need to be saved
0710  * and restored. On entry:
0711  *   x0 = previous task_struct (must be preserved across the switch)
0712  *   x1 = next task_struct
0713  * Previous and next are guaranteed not to be the same.
0714  *
0715  */
0716 ENTRY(cpu_switch_to)
0717     mov x10, #THREAD_CPU_CONTEXT
0718     add x8, x0, x10
0719     mov x9, sp
0720     stp x19, x20, [x8], #16     // store callee-saved registers
0721     stp x21, x22, [x8], #16
0722     stp x23, x24, [x8], #16
0723     stp x25, x26, [x8], #16
0724     stp x27, x28, [x8], #16
0725     stp x29, x9, [x8], #16
0726     str lr, [x8]
0727     add x8, x1, x10
0728     ldp x19, x20, [x8], #16     // restore callee-saved registers
0729     ldp x21, x22, [x8], #16
0730     ldp x23, x24, [x8], #16
0731     ldp x25, x26, [x8], #16
0732     ldp x27, x28, [x8], #16
0733     ldp x29, x9, [x8], #16
0734     ldr lr, [x8]
0735     mov sp, x9
0736     msr sp_el0, x1
0737     ret
0738 ENDPROC(cpu_switch_to)
0739 
0740 /*
0741  * This is the fast syscall return path.  We do as little as possible here,
0742  * and this includes saving x0 back into the kernel stack.
0743  */
0744 ret_fast_syscall:
0745     disable_irq             // disable interrupts
0746     str x0, [sp, #S_X0]         // returned x0
0747     ldr x1, [tsk, #TSK_TI_FLAGS]    // re-check for syscall tracing
0748     and x2, x1, #_TIF_SYSCALL_WORK
0749     cbnz    x2, ret_fast_syscall_trace
0750     and x2, x1, #_TIF_WORK_MASK
0751     cbnz    x2, work_pending
0752     enable_step_tsk x1, x2
0753     kernel_exit 0
0754 ret_fast_syscall_trace:
0755     enable_irq              // enable interrupts
0756     b   __sys_trace_return_skipped  // we already saved x0
0757 
0758 /*
0759  * Ok, we need to do extra processing, enter the slow path.
0760  */
0761 work_pending:
0762     mov x0, sp              // 'regs'
0763     bl  do_notify_resume
0764 #ifdef CONFIG_TRACE_IRQFLAGS
0765     bl  trace_hardirqs_on       // enabled while in userspace
0766 #endif
0767     ldr x1, [tsk, #TSK_TI_FLAGS]    // re-check for single-step
0768     b   finish_ret_to_user
0769 /*
0770  * "slow" syscall return path.
0771  */
0772 ret_to_user:
0773     disable_irq             // disable interrupts
0774     ldr x1, [tsk, #TSK_TI_FLAGS]
0775     and x2, x1, #_TIF_WORK_MASK
0776     cbnz    x2, work_pending
0777 finish_ret_to_user:
0778     enable_step_tsk x1, x2
0779     kernel_exit 0
0780 ENDPROC(ret_to_user)
0781 
0782 /*
0783  * This is how we return from a fork.
0784  */
0785 ENTRY(ret_from_fork)
0786     bl  schedule_tail
0787     cbz x19, 1f             // not a kernel thread
0788     mov x0, x20
0789     blr x19
0790 1:  get_thread_info tsk
0791     b   ret_to_user
0792 ENDPROC(ret_from_fork)
0793 
0794 /*
0795  * SVC handler.
0796  */
0797     .align  6
0798 el0_svc:
0799     adrp    stbl, sys_call_table        // load syscall table pointer
0800     uxtw    scno, w8            // syscall number in w8
0801     mov sc_nr, #__NR_syscalls
0802 el0_svc_naked:                  // compat entry point
0803     stp x0, scno, [sp, #S_ORIG_X0]  // save the original x0 and syscall number
0804     enable_dbg_and_irq
0805     ct_user_exit 1
0806 
0807     ldr x16, [tsk, #TSK_TI_FLAGS]   // check for syscall hooks
0808     tst x16, #_TIF_SYSCALL_WORK
0809     b.ne    __sys_trace
0810     cmp     scno, sc_nr                     // check upper syscall limit
0811     b.hs    ni_sys
0812     ldr x16, [stbl, scno, lsl #3]   // address in the syscall table
0813     blr x16             // call sys_* routine
0814     b   ret_fast_syscall
0815 ni_sys:
0816     mov x0, sp
0817     bl  do_ni_syscall
0818     b   ret_fast_syscall
0819 ENDPROC(el0_svc)
0820 
0821     /*
0822      * This is the really slow path.  We're going to be doing context
0823      * switches, and waiting for our parent to respond.
0824      */
0825 __sys_trace:
0826     mov w0, #-1             // set default errno for
0827     cmp     scno, x0            // user-issued syscall(-1)
0828     b.ne    1f
0829     mov x0, #-ENOSYS
0830     str x0, [sp, #S_X0]
0831 1:  mov x0, sp
0832     bl  syscall_trace_enter
0833     cmp w0, #-1             // skip the syscall?
0834     b.eq    __sys_trace_return_skipped
0835     uxtw    scno, w0            // syscall number (possibly new)
0836     mov x1, sp              // pointer to regs
0837     cmp scno, sc_nr         // check upper syscall limit
0838     b.hs    __ni_sys_trace
0839     ldp x0, x1, [sp]            // restore the syscall args
0840     ldp x2, x3, [sp, #S_X2]
0841     ldp x4, x5, [sp, #S_X4]
0842     ldp x6, x7, [sp, #S_X6]
0843     ldr x16, [stbl, scno, lsl #3]   // address in the syscall table
0844     blr x16             // call sys_* routine
0845 
0846 __sys_trace_return:
0847     str x0, [sp, #S_X0]         // save returned x0
0848 __sys_trace_return_skipped:
0849     mov x0, sp
0850     bl  syscall_trace_exit
0851     b   ret_to_user
0852 
0853 __ni_sys_trace:
0854     mov x0, sp
0855     bl  do_ni_syscall
0856     b   __sys_trace_return
0857 
0858     .popsection             // .entry.text
0859 
0860 /*
0861  * Special system call wrappers.
0862  */
0863 ENTRY(sys_rt_sigreturn_wrapper)
0864     mov x0, sp
0865     b   sys_rt_sigreturn
0866 ENDPROC(sys_rt_sigreturn_wrapper)