Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * arch/arm64/kernel/entry-ftrace.S
0004  *
0005  * Copyright (C) 2013 Linaro Limited
0006  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
0007  */
0008 
0009 #include <linux/linkage.h>
0010 #include <asm/asm-offsets.h>
0011 #include <asm/assembler.h>
0012 #include <asm/ftrace.h>
0013 #include <asm/insn.h>
0014 
0015 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
0016 /*
0017  * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
0018  * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
0019  * ftrace_make_call() have patched those NOPs to:
0020  *
0021  *  MOV X9, LR
0022  *  BL  <entry>
0023  *
0024  * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
0025  *
0026  * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
0027  * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
0028  * clobber.
0029  *
0030  * We save the callsite's context into a pt_regs before invoking any ftrace
0031  * callbacks. So that we can get a sensible backtrace, we create a stack record
0032  * for the callsite and the ftrace entry assembly. This is not sufficient for
0033  * reliable stacktrace: until we create the callsite stack record, its caller
0034  * is missing from the LR and existing chain of frame records.
0035  */
0036     .macro  ftrace_regs_entry, allregs=0
0037     /* Make room for pt_regs, plus a callee frame */
0038     sub sp, sp, #(PT_REGS_SIZE + 16)
0039 
0040     /* Save function arguments (and x9 for simplicity) */
0041     stp x0, x1, [sp, #S_X0]
0042     stp x2, x3, [sp, #S_X2]
0043     stp x4, x5, [sp, #S_X4]
0044     stp x6, x7, [sp, #S_X6]
0045     stp x8, x9, [sp, #S_X8]
0046 
0047     /* Optionally save the callee-saved registers, always save the FP */
0048     .if \allregs == 1
0049     stp x10, x11, [sp, #S_X10]
0050     stp x12, x13, [sp, #S_X12]
0051     stp x14, x15, [sp, #S_X14]
0052     stp x16, x17, [sp, #S_X16]
0053     stp x18, x19, [sp, #S_X18]
0054     stp x20, x21, [sp, #S_X20]
0055     stp x22, x23, [sp, #S_X22]
0056     stp x24, x25, [sp, #S_X24]
0057     stp x26, x27, [sp, #S_X26]
0058     stp x28, x29, [sp, #S_X28]
0059     .else
0060     str x29, [sp, #S_FP]
0061     .endif
0062 
0063     /* Save the callsite's SP and LR */
0064     add x10, sp, #(PT_REGS_SIZE + 16)
0065     stp x9, x10, [sp, #S_LR]
0066 
0067     /* Save the PC after the ftrace callsite */
0068     str x30, [sp, #S_PC]
0069 
0070     /* Create a frame record for the callsite above pt_regs */
0071     stp x29, x9, [sp, #PT_REGS_SIZE]
0072     add x29, sp, #PT_REGS_SIZE
0073 
0074     /* Create our frame record within pt_regs. */
0075     stp x29, x30, [sp, #S_STACKFRAME]
0076     add x29, sp, #S_STACKFRAME
0077     .endm
0078 
0079 SYM_CODE_START(ftrace_regs_caller)
0080     bti c
0081     ftrace_regs_entry   1
0082     b   ftrace_common
0083 SYM_CODE_END(ftrace_regs_caller)
0084 
0085 SYM_CODE_START(ftrace_caller)
0086     bti c
0087     ftrace_regs_entry   0
0088     b   ftrace_common
0089 SYM_CODE_END(ftrace_caller)
0090 
0091 SYM_CODE_START(ftrace_common)
0092     sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
0093     mov x1, x9              // parent_ip (callsite's LR)
0094     ldr_l   x2, function_trace_op       // op
0095     mov x3, sp              // regs
0096 
0097 SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
0098     bl  ftrace_stub
0099 
0100 /*
0101  * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
0102  * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
0103  * to restore x0-x8, x29, and x30.
0104  */
0105     /* Restore function arguments */
0106     ldp x0, x1, [sp]
0107     ldp x2, x3, [sp, #S_X2]
0108     ldp x4, x5, [sp, #S_X4]
0109     ldp x6, x7, [sp, #S_X6]
0110     ldr x8, [sp, #S_X8]
0111 
0112     /* Restore the callsite's FP, LR, PC */
0113     ldr x29, [sp, #S_FP]
0114     ldr x30, [sp, #S_LR]
0115     ldr x9, [sp, #S_PC]
0116 
0117     /* Restore the callsite's SP */
0118     add sp, sp, #PT_REGS_SIZE + 16
0119 
0120     ret x9
0121 SYM_CODE_END(ftrace_common)
0122 
0123 #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
0124 
0125 /*
0126  * Gcc with -pg will put the following code in the beginning of each function:
0127  *      mov x0, x30
0128  *      bl _mcount
0129  *  [function's body ...]
0130  * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
0131  * ftrace is enabled.
0132  *
0133  * Please note that x0 as an argument will not be used here because we can
0134  * get lr(x30) of instrumented function at any time by winding up call stack
0135  * as long as the kernel is compiled without -fomit-frame-pointer.
0136  * (or CONFIG_FRAME_POINTER, this is forced on arm64)
0137  *
0138  * stack layout after mcount_enter in _mcount():
0139  *
0140  * current sp/fp =>  0:+-----+
0141  * in _mcount()        | x29 | -> instrumented function's fp
0142  *                     +-----+
0143  *                     | x30 | -> _mcount()'s lr (= instrumented function's pc)
0144  * old sp       => +16:+-----+
0145  * when instrumented   |     |
0146  * function calls      | ... |
0147  * _mcount()           |     |
0148  *                     |     |
0149  * instrumented => +xx:+-----+
0150  * function's fp       | x29 | -> parent's fp
0151  *                     +-----+
0152  *                     | x30 | -> instrumented function's lr (= parent's pc)
0153  *                     +-----+
0154  *                     | ... |
0155  */
0156 
0157     .macro mcount_enter
0158     stp x29, x30, [sp, #-16]!
0159     mov x29, sp
0160     .endm
0161 
0162     .macro mcount_exit
0163     ldp x29, x30, [sp], #16
0164     ret
0165     .endm
0166 
0167     .macro mcount_adjust_addr rd, rn
0168     sub \rd, \rn, #AARCH64_INSN_SIZE
0169     .endm
0170 
0171     /* for instrumented function's parent */
0172     .macro mcount_get_parent_fp reg
0173     ldr \reg, [x29]
0174     ldr \reg, [\reg]
0175     .endm
0176 
0177     /* for instrumented function */
0178     .macro mcount_get_pc0 reg
0179     mcount_adjust_addr  \reg, x30
0180     .endm
0181 
0182     .macro mcount_get_pc reg
0183     ldr \reg, [x29, #8]
0184     mcount_adjust_addr  \reg, \reg
0185     .endm
0186 
0187     .macro mcount_get_lr reg
0188     ldr \reg, [x29]
0189     ldr \reg, [\reg, #8]
0190     .endm
0191 
0192     .macro mcount_get_lr_addr reg
0193     ldr \reg, [x29]
0194     add \reg, \reg, #8
0195     .endm
0196 
0197 #ifndef CONFIG_DYNAMIC_FTRACE
0198 /*
0199  * void _mcount(unsigned long return_address)
0200  * @return_address: return address to instrumented function
0201  *
0202  * This function makes calls, if enabled, to:
0203  *     - tracer function to probe instrumented function's entry,
0204  *     - ftrace_graph_caller to set up an exit hook
0205  */
0206 SYM_FUNC_START(_mcount)
0207     mcount_enter
0208 
0209     ldr_l   x2, ftrace_trace_function
0210     adr x0, ftrace_stub
0211     cmp x0, x2          // if (ftrace_trace_function
0212     b.eq    skip_ftrace_call    //     != ftrace_stub) {
0213 
0214     mcount_get_pc   x0      //       function's pc
0215     mcount_get_lr   x1      //       function's lr (= parent's pc)
0216     blr x2          //   (*ftrace_trace_function)(pc, lr);
0217 
0218 skip_ftrace_call:           // }
0219 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0220     ldr_l   x2, ftrace_graph_return
0221     cmp x0, x2          //   if ((ftrace_graph_return
0222     b.ne    ftrace_graph_caller //        != ftrace_stub)
0223 
0224     ldr_l   x2, ftrace_graph_entry  //     || (ftrace_graph_entry
0225     adr_l   x0, ftrace_graph_entry_stub //     != ftrace_graph_entry_stub))
0226     cmp x0, x2
0227     b.ne    ftrace_graph_caller //     ftrace_graph_caller();
0228 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
0229     mcount_exit
0230 SYM_FUNC_END(_mcount)
0231 EXPORT_SYMBOL(_mcount)
0232 NOKPROBE(_mcount)
0233 
0234 #else /* CONFIG_DYNAMIC_FTRACE */
0235 /*
0236  * _mcount() is used to build the kernel with -pg option, but all the branch
0237  * instructions to _mcount() are replaced to NOP initially at kernel start up,
0238  * and later on, NOP to branch to ftrace_caller() when enabled or branch to
0239  * NOP when disabled per-function base.
0240  */
0241 SYM_FUNC_START(_mcount)
0242     ret
0243 SYM_FUNC_END(_mcount)
0244 EXPORT_SYMBOL(_mcount)
0245 NOKPROBE(_mcount)
0246 
0247 /*
0248  * void ftrace_caller(unsigned long return_address)
0249  * @return_address: return address to instrumented function
0250  *
0251  * This function is a counterpart of _mcount() in 'static' ftrace, and
0252  * makes calls to:
0253  *     - tracer function to probe instrumented function's entry,
0254  *     - ftrace_graph_caller to set up an exit hook
0255  */
0256 SYM_FUNC_START(ftrace_caller)
0257     mcount_enter
0258 
0259     mcount_get_pc0  x0      //     function's pc
0260     mcount_get_lr   x1      //     function's lr
0261 
0262 SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)  // tracer(pc, lr);
0263     nop             // This will be replaced with "bl xxx"
0264                     // where xxx can be any kind of tracer.
0265 
0266 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0267 SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
0268     nop             // If enabled, this will be replaced
0269                     // "b ftrace_graph_caller"
0270 #endif
0271 
0272     mcount_exit
0273 SYM_FUNC_END(ftrace_caller)
0274 #endif /* CONFIG_DYNAMIC_FTRACE */
0275 
0276 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0277 /*
0278  * void ftrace_graph_caller(void)
0279  *
0280  * Called from _mcount() or ftrace_caller() when function_graph tracer is
0281  * selected.
0282  * This function w/ prepare_ftrace_return() fakes link register's value on
0283  * the call stack in order to intercept instrumented function's return path
0284  * and run return_to_handler() later on its exit.
0285  */
0286 SYM_FUNC_START(ftrace_graph_caller)
0287     mcount_get_pc         x0    //     function's pc
0288     mcount_get_lr_addr    x1    //     pointer to function's saved lr
0289     mcount_get_parent_fp      x2    //     parent's fp
0290     bl  prepare_ftrace_return   // prepare_ftrace_return(pc, &lr, fp)
0291 
0292     mcount_exit
0293 SYM_FUNC_END(ftrace_graph_caller)
0294 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
0295 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
0296 
0297 SYM_FUNC_START(ftrace_stub)
0298     ret
0299 SYM_FUNC_END(ftrace_stub)
0300 
0301 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0302 /*
0303  * void return_to_handler(void)
0304  *
0305  * Run ftrace_return_to_handler() before going back to parent.
0306  * @fp is checked against the value passed by ftrace_graph_caller().
0307  */
0308 SYM_CODE_START(return_to_handler)
0309     /* save return value regs */
0310     sub sp, sp, #64
0311     stp x0, x1, [sp]
0312     stp x2, x3, [sp, #16]
0313     stp x4, x5, [sp, #32]
0314     stp x6, x7, [sp, #48]
0315 
0316     mov x0, x29         //     parent's fp
0317     bl  ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
0318     mov x30, x0         // restore the original return address
0319 
0320     /* restore return value regs */
0321     ldp x0, x1, [sp]
0322     ldp x2, x3, [sp, #16]
0323     ldp x4, x5, [sp, #32]
0324     ldp x6, x7, [sp, #48]
0325     add sp, sp, #64
0326 
0327     ret
0328 SYM_CODE_END(return_to_handler)
0329 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */