Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Dynamic function tracing support.
0004  *
0005  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
0006  *
0007  * Thanks goes to Ingo Molnar, for suggesting the idea.
0008  * Mathieu Desnoyers, for suggesting postponing the modifications.
0009  * Arjan van de Ven, for keeping me straight, and explaining to me
0010  * the dangers of modifying code on the run.
0011  */
0012 
0013 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0014 
0015 #include <linux/spinlock.h>
0016 #include <linux/hardirq.h>
0017 #include <linux/uaccess.h>
0018 #include <linux/ftrace.h>
0019 #include <linux/percpu.h>
0020 #include <linux/sched.h>
0021 #include <linux/slab.h>
0022 #include <linux/init.h>
0023 #include <linux/list.h>
0024 #include <linux/module.h>
0025 #include <linux/memory.h>
0026 #include <linux/vmalloc.h>
0027 
0028 #include <trace/syscall.h>
0029 
0030 #include <asm/set_memory.h>
0031 #include <asm/kprobes.h>
0032 #include <asm/ftrace.h>
0033 #include <asm/nops.h>
0034 #include <asm/text-patching.h>
0035 
0036 #ifdef CONFIG_DYNAMIC_FTRACE
0037 
0038 static int ftrace_poke_late = 0;
0039 
0040 void ftrace_arch_code_modify_prepare(void)
0041     __acquires(&text_mutex)
0042 {
0043     /*
0044      * Need to grab text_mutex to prevent a race from module loading
0045      * and live kernel patching from changing the text permissions while
0046      * ftrace has it set to "read/write".
0047      */
0048     mutex_lock(&text_mutex);
0049     ftrace_poke_late = 1;
0050 }
0051 
0052 void ftrace_arch_code_modify_post_process(void)
0053     __releases(&text_mutex)
0054 {
0055     /*
0056      * ftrace_make_{call,nop}() may be called during
0057      * module load, and we need to finish the text_poke_queue()
0058      * that they do, here.
0059      */
0060     text_poke_finish();
0061     ftrace_poke_late = 0;
0062     mutex_unlock(&text_mutex);
0063 }
0064 
0065 static const char *ftrace_nop_replace(void)
0066 {
0067     return x86_nops[5];
0068 }
0069 
0070 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
0071 {
0072     return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
0073 }
0074 
0075 static int ftrace_verify_code(unsigned long ip, const char *old_code)
0076 {
0077     char cur_code[MCOUNT_INSN_SIZE];
0078 
0079     /*
0080      * Note:
0081      * We are paranoid about modifying text, as if a bug was to happen, it
0082      * could cause us to read or write to someplace that could cause harm.
0083      * Carefully read and modify the code with probe_kernel_*(), and make
0084      * sure what we read is what we expected it to be before modifying it.
0085      */
0086     /* read the text we want to modify */
0087     if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
0088         WARN_ON(1);
0089         return -EFAULT;
0090     }
0091 
0092     /* Make sure it is what we expect it to be */
0093     if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
0094         ftrace_expected = old_code;
0095         WARN_ON(1);
0096         return -EINVAL;
0097     }
0098 
0099     return 0;
0100 }
0101 
0102 /*
0103  * Marked __ref because it calls text_poke_early() which is .init.text. That is
0104  * ok because that call will happen early, during boot, when .init sections are
0105  * still present.
0106  */
0107 static int __ref
0108 ftrace_modify_code_direct(unsigned long ip, const char *old_code,
0109               const char *new_code)
0110 {
0111     int ret = ftrace_verify_code(ip, old_code);
0112     if (ret)
0113         return ret;
0114 
0115     /* replace the text with the new text */
0116     if (ftrace_poke_late)
0117         text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
0118     else
0119         text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
0120     return 0;
0121 }
0122 
0123 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
0124 {
0125     unsigned long ip = rec->ip;
0126     const char *new, *old;
0127 
0128     old = ftrace_call_replace(ip, addr);
0129     new = ftrace_nop_replace();
0130 
0131     /*
0132      * On boot up, and when modules are loaded, the MCOUNT_ADDR
0133      * is converted to a nop, and will never become MCOUNT_ADDR
0134      * again. This code is either running before SMP (on boot up)
0135      * or before the code will ever be executed (module load).
0136      * We do not want to use the breakpoint version in this case,
0137      * just modify the code directly.
0138      */
0139     if (addr == MCOUNT_ADDR)
0140         return ftrace_modify_code_direct(ip, old, new);
0141 
0142     /*
0143      * x86 overrides ftrace_replace_code -- this function will never be used
0144      * in this case.
0145      */
0146     WARN_ONCE(1, "invalid use of ftrace_make_nop");
0147     return -EINVAL;
0148 }
0149 
0150 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
0151 {
0152     unsigned long ip = rec->ip;
0153     const char *new, *old;
0154 
0155     old = ftrace_nop_replace();
0156     new = ftrace_call_replace(ip, addr);
0157 
0158     /* Should only be called when module is loaded */
0159     return ftrace_modify_code_direct(rec->ip, old, new);
0160 }
0161 
0162 /*
0163  * Should never be called:
0164  *  As it is only called by __ftrace_replace_code() which is called by
0165  *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
0166  *  which is called to turn mcount into nops or nops into function calls
0167  *  but not to convert a function from not using regs to one that uses
0168  *  regs, which ftrace_modify_call() is for.
0169  */
0170 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
0171                  unsigned long addr)
0172 {
0173     WARN_ON(1);
0174     return -EINVAL;
0175 }
0176 
0177 int ftrace_update_ftrace_func(ftrace_func_t func)
0178 {
0179     unsigned long ip;
0180     const char *new;
0181 
0182     ip = (unsigned long)(&ftrace_call);
0183     new = ftrace_call_replace(ip, (unsigned long)func);
0184     text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
0185 
0186     ip = (unsigned long)(&ftrace_regs_call);
0187     new = ftrace_call_replace(ip, (unsigned long)func);
0188     text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
0189 
0190     return 0;
0191 }
0192 
0193 void ftrace_replace_code(int enable)
0194 {
0195     struct ftrace_rec_iter *iter;
0196     struct dyn_ftrace *rec;
0197     const char *new, *old;
0198     int ret;
0199 
0200     for_ftrace_rec_iter(iter) {
0201         rec = ftrace_rec_iter_record(iter);
0202 
0203         switch (ftrace_test_record(rec, enable)) {
0204         case FTRACE_UPDATE_IGNORE:
0205         default:
0206             continue;
0207 
0208         case FTRACE_UPDATE_MAKE_CALL:
0209             old = ftrace_nop_replace();
0210             break;
0211 
0212         case FTRACE_UPDATE_MODIFY_CALL:
0213         case FTRACE_UPDATE_MAKE_NOP:
0214             old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
0215             break;
0216         }
0217 
0218         ret = ftrace_verify_code(rec->ip, old);
0219         if (ret) {
0220             ftrace_bug(ret, rec);
0221             return;
0222         }
0223     }
0224 
0225     for_ftrace_rec_iter(iter) {
0226         rec = ftrace_rec_iter_record(iter);
0227 
0228         switch (ftrace_test_record(rec, enable)) {
0229         case FTRACE_UPDATE_IGNORE:
0230         default:
0231             continue;
0232 
0233         case FTRACE_UPDATE_MAKE_CALL:
0234         case FTRACE_UPDATE_MODIFY_CALL:
0235             new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
0236             break;
0237 
0238         case FTRACE_UPDATE_MAKE_NOP:
0239             new = ftrace_nop_replace();
0240             break;
0241         }
0242 
0243         text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
0244         ftrace_update_record(rec, enable);
0245     }
0246     text_poke_finish();
0247 }
0248 
0249 void arch_ftrace_update_code(int command)
0250 {
0251     ftrace_modify_all_code(command);
0252 }
0253 
0254 /* Currently only x86_64 supports dynamic trampolines */
0255 #ifdef CONFIG_X86_64
0256 
0257 #ifdef CONFIG_MODULES
0258 #include <linux/moduleloader.h>
0259 /* Module allocation simplifies allocating memory for code */
0260 static inline void *alloc_tramp(unsigned long size)
0261 {
0262     return module_alloc(size);
0263 }
0264 static inline void tramp_free(void *tramp)
0265 {
0266     module_memfree(tramp);
0267 }
0268 #else
0269 /* Trampolines can only be created if modules are supported */
0270 static inline void *alloc_tramp(unsigned long size)
0271 {
0272     return NULL;
0273 }
0274 static inline void tramp_free(void *tramp) { }
0275 #endif
0276 
0277 /* Defined as markers to the end of the ftrace default trampolines */
0278 extern void ftrace_regs_caller_end(void);
0279 extern void ftrace_regs_caller_ret(void);
0280 extern void ftrace_caller_end(void);
0281 extern void ftrace_caller_op_ptr(void);
0282 extern void ftrace_regs_caller_op_ptr(void);
0283 extern void ftrace_regs_caller_jmp(void);
0284 
0285 /* movq function_trace_op(%rip), %rdx */
0286 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
0287 #define OP_REF_SIZE 7
0288 
0289 /*
0290  * The ftrace_ops is passed to the function callback. Since the
0291  * trampoline only services a single ftrace_ops, we can pass in
0292  * that ops directly.
0293  *
0294  * The ftrace_op_code_union is used to create a pointer to the
0295  * ftrace_ops that will be passed to the callback function.
0296  */
0297 union ftrace_op_code_union {
0298     char code[OP_REF_SIZE];
0299     struct {
0300         char op[3];
0301         int offset;
0302     } __attribute__((packed));
0303 };
0304 
0305 #define RET_SIZE        (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
0306 
0307 static unsigned long
0308 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
0309 {
0310     unsigned long start_offset;
0311     unsigned long end_offset;
0312     unsigned long op_offset;
0313     unsigned long call_offset;
0314     unsigned long jmp_offset;
0315     unsigned long offset;
0316     unsigned long npages;
0317     unsigned long size;
0318     unsigned long *ptr;
0319     void *trampoline;
0320     void *ip;
0321     /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
0322     unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
0323     unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
0324     union ftrace_op_code_union op_ptr;
0325     int ret;
0326 
0327     if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
0328         start_offset = (unsigned long)ftrace_regs_caller;
0329         end_offset = (unsigned long)ftrace_regs_caller_end;
0330         op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
0331         call_offset = (unsigned long)ftrace_regs_call;
0332         jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
0333     } else {
0334         start_offset = (unsigned long)ftrace_caller;
0335         end_offset = (unsigned long)ftrace_caller_end;
0336         op_offset = (unsigned long)ftrace_caller_op_ptr;
0337         call_offset = (unsigned long)ftrace_call;
0338         jmp_offset = 0;
0339     }
0340 
0341     size = end_offset - start_offset;
0342 
0343     /*
0344      * Allocate enough size to store the ftrace_caller code,
0345      * the iret , as well as the address of the ftrace_ops this
0346      * trampoline is used for.
0347      */
0348     trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
0349     if (!trampoline)
0350         return 0;
0351 
0352     *tramp_size = size + RET_SIZE + sizeof(void *);
0353     npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
0354 
0355     /* Copy ftrace_caller onto the trampoline memory */
0356     ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
0357     if (WARN_ON(ret < 0))
0358         goto fail;
0359 
0360     ip = trampoline + size;
0361     if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
0362         __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE);
0363     else
0364         memcpy(ip, retq, sizeof(retq));
0365 
0366     /* No need to test direct calls on created trampolines */
0367     if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
0368         /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
0369         ip = trampoline + (jmp_offset - start_offset);
0370         if (WARN_ON(*(char *)ip != 0x75))
0371             goto fail;
0372         ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
0373         if (ret < 0)
0374             goto fail;
0375     }
0376 
0377     /*
0378      * The address of the ftrace_ops that is used for this trampoline
0379      * is stored at the end of the trampoline. This will be used to
0380      * load the third parameter for the callback. Basically, that
0381      * location at the end of the trampoline takes the place of
0382      * the global function_trace_op variable.
0383      */
0384 
0385     ptr = (unsigned long *)(trampoline + size + RET_SIZE);
0386     *ptr = (unsigned long)ops;
0387 
0388     op_offset -= start_offset;
0389     memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
0390 
0391     /* Are we pointing to the reference? */
0392     if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
0393         goto fail;
0394 
0395     /* Load the contents of ptr into the callback parameter */
0396     offset = (unsigned long)ptr;
0397     offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
0398 
0399     op_ptr.offset = offset;
0400 
0401     /* put in the new offset to the ftrace_ops */
0402     memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
0403 
0404     /* put in the call to the function */
0405     mutex_lock(&text_mutex);
0406     call_offset -= start_offset;
0407     memcpy(trampoline + call_offset,
0408            text_gen_insn(CALL_INSN_OPCODE,
0409                  trampoline + call_offset,
0410                  ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
0411     mutex_unlock(&text_mutex);
0412 
0413     /* ALLOC_TRAMP flags lets us know we created it */
0414     ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
0415 
0416     set_vm_flush_reset_perms(trampoline);
0417 
0418     if (likely(system_state != SYSTEM_BOOTING))
0419         set_memory_ro((unsigned long)trampoline, npages);
0420     set_memory_x((unsigned long)trampoline, npages);
0421     return (unsigned long)trampoline;
0422 fail:
0423     tramp_free(trampoline);
0424     return 0;
0425 }
0426 
0427 void set_ftrace_ops_ro(void)
0428 {
0429     struct ftrace_ops *ops;
0430     unsigned long start_offset;
0431     unsigned long end_offset;
0432     unsigned long npages;
0433     unsigned long size;
0434 
0435     do_for_each_ftrace_op(ops, ftrace_ops_list) {
0436         if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
0437             continue;
0438 
0439         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
0440             start_offset = (unsigned long)ftrace_regs_caller;
0441             end_offset = (unsigned long)ftrace_regs_caller_end;
0442         } else {
0443             start_offset = (unsigned long)ftrace_caller;
0444             end_offset = (unsigned long)ftrace_caller_end;
0445         }
0446         size = end_offset - start_offset;
0447         size = size + RET_SIZE + sizeof(void *);
0448         npages = DIV_ROUND_UP(size, PAGE_SIZE);
0449         set_memory_ro((unsigned long)ops->trampoline, npages);
0450     } while_for_each_ftrace_op(ops);
0451 }
0452 
0453 static unsigned long calc_trampoline_call_offset(bool save_regs)
0454 {
0455     unsigned long start_offset;
0456     unsigned long call_offset;
0457 
0458     if (save_regs) {
0459         start_offset = (unsigned long)ftrace_regs_caller;
0460         call_offset = (unsigned long)ftrace_regs_call;
0461     } else {
0462         start_offset = (unsigned long)ftrace_caller;
0463         call_offset = (unsigned long)ftrace_call;
0464     }
0465 
0466     return call_offset - start_offset;
0467 }
0468 
0469 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
0470 {
0471     ftrace_func_t func;
0472     unsigned long offset;
0473     unsigned long ip;
0474     unsigned int size;
0475     const char *new;
0476 
0477     if (!ops->trampoline) {
0478         ops->trampoline = create_trampoline(ops, &size);
0479         if (!ops->trampoline)
0480             return;
0481         ops->trampoline_size = size;
0482         return;
0483     }
0484 
0485     /*
0486      * The ftrace_ops caller may set up its own trampoline.
0487      * In such a case, this code must not modify it.
0488      */
0489     if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
0490         return;
0491 
0492     offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
0493     ip = ops->trampoline + offset;
0494     func = ftrace_ops_get_func(ops);
0495 
0496     mutex_lock(&text_mutex);
0497     /* Do a safe modify in case the trampoline is executing */
0498     new = ftrace_call_replace(ip, (unsigned long)func);
0499     text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
0500     mutex_unlock(&text_mutex);
0501 }
0502 
0503 /* Return the address of the function the trampoline calls */
0504 static void *addr_from_call(void *ptr)
0505 {
0506     union text_poke_insn call;
0507     int ret;
0508 
0509     ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
0510     if (WARN_ON_ONCE(ret < 0))
0511         return NULL;
0512 
0513     /* Make sure this is a call */
0514     if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
0515         pr_warn("Expected E8, got %x\n", call.opcode);
0516         return NULL;
0517     }
0518 
0519     return ptr + CALL_INSN_SIZE + call.disp;
0520 }
0521 
0522 void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
0523                unsigned long frame_pointer);
0524 
0525 /*
0526  * If the ops->trampoline was not allocated, then it probably
0527  * has a static trampoline func, or is the ftrace caller itself.
0528  */
0529 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
0530 {
0531     unsigned long offset;
0532     bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
0533     void *ptr;
0534 
0535     if (ops && ops->trampoline) {
0536 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
0537     defined(CONFIG_FUNCTION_GRAPH_TRACER)
0538         /*
0539          * We only know about function graph tracer setting as static
0540          * trampoline.
0541          */
0542         if (ops->trampoline == FTRACE_GRAPH_ADDR)
0543             return (void *)prepare_ftrace_return;
0544 #endif
0545         return NULL;
0546     }
0547 
0548     offset = calc_trampoline_call_offset(save_regs);
0549 
0550     if (save_regs)
0551         ptr = (void *)FTRACE_REGS_ADDR + offset;
0552     else
0553         ptr = (void *)FTRACE_ADDR + offset;
0554 
0555     return addr_from_call(ptr);
0556 }
0557 
0558 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
0559 {
0560     unsigned long offset;
0561 
0562     /* If we didn't allocate this trampoline, consider it static */
0563     if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
0564         return static_tramp_func(ops, rec);
0565 
0566     offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
0567     return addr_from_call((void *)ops->trampoline + offset);
0568 }
0569 
0570 void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
0571 {
0572     if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
0573         return;
0574 
0575     tramp_free((void *)ops->trampoline);
0576     ops->trampoline = 0;
0577 }
0578 
0579 #endif /* CONFIG_X86_64 */
0580 #endif /* CONFIG_DYNAMIC_FTRACE */
0581 
0582 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0583 
0584 #if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
0585 extern void ftrace_graph_call(void);
0586 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
0587 {
0588     return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
0589 }
0590 
0591 static int ftrace_mod_jmp(unsigned long ip, void *func)
0592 {
0593     const char *new;
0594 
0595     new = ftrace_jmp_replace(ip, (unsigned long)func);
0596     text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
0597     return 0;
0598 }
0599 
0600 int ftrace_enable_ftrace_graph_caller(void)
0601 {
0602     unsigned long ip = (unsigned long)(&ftrace_graph_call);
0603 
0604     return ftrace_mod_jmp(ip, &ftrace_graph_caller);
0605 }
0606 
0607 int ftrace_disable_ftrace_graph_caller(void)
0608 {
0609     unsigned long ip = (unsigned long)(&ftrace_graph_call);
0610 
0611     return ftrace_mod_jmp(ip, &ftrace_stub);
0612 }
0613 #endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
0614 
0615 /*
0616  * Hook the return address and push it in the stack of return addrs
0617  * in current thread info.
0618  */
0619 void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
0620                unsigned long frame_pointer)
0621 {
0622     unsigned long return_hooker = (unsigned long)&return_to_handler;
0623     int bit;
0624 
0625     /*
0626      * When resuming from suspend-to-ram, this function can be indirectly
0627      * called from early CPU startup code while the CPU is in real mode,
0628      * which would fail miserably.  Make sure the stack pointer is a
0629      * virtual address.
0630      *
0631      * This check isn't as accurate as virt_addr_valid(), but it should be
0632      * good enough for this purpose, and it's fast.
0633      */
0634     if (unlikely((long)__builtin_frame_address(0) >= 0))
0635         return;
0636 
0637     if (unlikely(ftrace_graph_is_dead()))
0638         return;
0639 
0640     if (unlikely(atomic_read(&current->tracing_graph_pause)))
0641         return;
0642 
0643     bit = ftrace_test_recursion_trylock(ip, *parent);
0644     if (bit < 0)
0645         return;
0646 
0647     if (!function_graph_enter(*parent, ip, frame_pointer, parent))
0648         *parent = return_hooker;
0649 
0650     ftrace_test_recursion_unlock(bit);
0651 }
0652 
0653 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
0654 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
0655                struct ftrace_ops *op, struct ftrace_regs *fregs)
0656 {
0657     struct pt_regs *regs = &fregs->regs;
0658     unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
0659 
0660     prepare_ftrace_return(ip, (unsigned long *)stack, 0);
0661 }
0662 #endif
0663 
0664 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */