Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Code for replacing ftrace calls with jumps.
0004  *
0005  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
0006  *
0007  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
0008  *
0009  * Added function graph tracer code, taken from x86 that was written
0010  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
0011  *
0012  */
0013 
0014 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
0015 
0016 #include <linux/spinlock.h>
0017 #include <linux/hardirq.h>
0018 #include <linux/uaccess.h>
0019 #include <linux/module.h>
0020 #include <linux/ftrace.h>
0021 #include <linux/percpu.h>
0022 #include <linux/init.h>
0023 #include <linux/list.h>
0024 
0025 #include <asm/cacheflush.h>
0026 #include <asm/code-patching.h>
0027 #include <asm/ftrace.h>
0028 #include <asm/syscall.h>
0029 #include <asm/inst.h>
0030 
0031 /*
0032  * We generally only have a single long_branch tramp and at most 2 or 3 plt
0033  * tramps generated. But, we don't use the plt tramps currently. We also allot
0034  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
0035  * tramps in total. Set aside 8 just to be sure.
0036  */
0037 #define NUM_FTRACE_TRAMPS   8
0038 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
0039 
0040 static ppc_inst_t
0041 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
0042 {
0043     ppc_inst_t op;
0044 
0045     addr = ppc_function_entry((void *)addr);
0046 
0047     /* if (link) set op to 'bl' else 'b' */
0048     create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
0049 
0050     return op;
0051 }
0052 
0053 static inline int
0054 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
0055 {
0056     ppc_inst_t replaced;
0057 
0058     /*
0059      * Note:
0060      * We are paranoid about modifying text, as if a bug was to happen, it
0061      * could cause us to read or write to someplace that could cause harm.
0062      * Carefully read and modify the code with probe_kernel_*(), and make
0063      * sure what we read is what we expected it to be before modifying it.
0064      */
0065 
0066     /* read the text we want to modify */
0067     if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
0068         return -EFAULT;
0069 
0070     /* Make sure it is what we expect it to be */
0071     if (!ppc_inst_equal(replaced, old)) {
0072         pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
0073                ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
0074         return -EINVAL;
0075     }
0076 
0077     /* replace the text with the new text */
0078     return patch_instruction((u32 *)ip, new);
0079 }
0080 
0081 /*
0082  * Helper functions that are the same for both PPC64 and PPC32.
0083  */
0084 static int test_24bit_addr(unsigned long ip, unsigned long addr)
0085 {
0086     addr = ppc_function_entry((void *)addr);
0087 
0088     return is_offset_in_branch_range(addr - ip);
0089 }
0090 
0091 static int is_bl_op(ppc_inst_t op)
0092 {
0093     return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
0094 }
0095 
0096 static int is_b_op(ppc_inst_t op)
0097 {
0098     return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
0099 }
0100 
0101 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
0102 {
0103     int offset;
0104 
0105     offset = PPC_LI(ppc_inst_val(op));
0106     /* make it signed */
0107     if (offset & 0x02000000)
0108         offset |= 0xfe000000;
0109 
0110     return ip + (long)offset;
0111 }
0112 
0113 #ifdef CONFIG_MODULES
0114 static int
0115 __ftrace_make_nop(struct module *mod,
0116           struct dyn_ftrace *rec, unsigned long addr)
0117 {
0118     unsigned long entry, ptr, tramp;
0119     unsigned long ip = rec->ip;
0120     ppc_inst_t op, pop;
0121 
0122     /* read where this goes */
0123     if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
0124         pr_err("Fetching opcode failed.\n");
0125         return -EFAULT;
0126     }
0127 
0128     /* Make sure that this is still a 24bit jump */
0129     if (!is_bl_op(op)) {
0130         pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
0131         return -EINVAL;
0132     }
0133 
0134     /* lets find where the pointer goes */
0135     tramp = find_bl_target(ip, op);
0136 
0137     pr_devel("ip:%lx jumps to %lx", ip, tramp);
0138 
0139     if (module_trampoline_target(mod, tramp, &ptr)) {
0140         pr_err("Failed to get trampoline target\n");
0141         return -EFAULT;
0142     }
0143 
0144     pr_devel("trampoline target %lx", ptr);
0145 
0146     entry = ppc_global_function_entry((void *)addr);
0147     /* This should match what was called */
0148     if (ptr != entry) {
0149         pr_err("addr %lx does not match expected %lx\n", ptr, entry);
0150         return -EINVAL;
0151     }
0152 
0153     if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
0154         if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
0155             pr_err("Fetching instruction at %lx failed.\n", ip - 4);
0156             return -EFAULT;
0157         }
0158 
0159         /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
0160         if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
0161             !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
0162             pr_err("Unexpected instruction %08lx around bl _mcount\n",
0163                    ppc_inst_as_ulong(op));
0164             return -EINVAL;
0165         }
0166     } else if (IS_ENABLED(CONFIG_PPC64)) {
0167         /*
0168          * Check what is in the next instruction. We can see ld r2,40(r1), but
0169          * on first pass after boot we will see mflr r0.
0170          */
0171         if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
0172             pr_err("Fetching op failed.\n");
0173             return -EFAULT;
0174         }
0175 
0176         if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
0177             pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
0178                    ppc_inst_as_ulong(op));
0179             return -EINVAL;
0180         }
0181     }
0182 
0183     /*
0184      * When using -mprofile-kernel or PPC32 there is no load to jump over.
0185      *
0186      * Otherwise our original call site looks like:
0187      *
0188      * bl <tramp>
0189      * ld r2,XX(r1)
0190      *
0191      * Milton Miller pointed out that we can not simply nop the branch.
0192      * If a task was preempted when calling a trace function, the nops
0193      * will remove the way to restore the TOC in r2 and the r2 TOC will
0194      * get corrupted.
0195      *
0196      * Use a b +8 to jump over the load.
0197      */
0198     if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
0199         pop = ppc_inst(PPC_RAW_NOP());
0200     else
0201         pop = ppc_inst(PPC_RAW_BRANCH(8));  /* b +8 */
0202 
0203     if (patch_instruction((u32 *)ip, pop)) {
0204         pr_err("Patching NOP failed.\n");
0205         return -EPERM;
0206     }
0207 
0208     return 0;
0209 }
0210 #else
0211 static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
0212 {
0213     return 0;
0214 }
0215 #endif /* CONFIG_MODULES */
0216 
0217 static unsigned long find_ftrace_tramp(unsigned long ip)
0218 {
0219     int i;
0220 
0221     /*
0222      * We have the compiler generated long_branch tramps at the end
0223      * and we prefer those
0224      */
0225     for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
0226         if (!ftrace_tramps[i])
0227             continue;
0228         else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
0229             return ftrace_tramps[i];
0230 
0231     return 0;
0232 }
0233 
0234 static int add_ftrace_tramp(unsigned long tramp)
0235 {
0236     int i;
0237 
0238     for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
0239         if (!ftrace_tramps[i]) {
0240             ftrace_tramps[i] = tramp;
0241             return 0;
0242         }
0243 
0244     return -1;
0245 }
0246 
0247 /*
0248  * If this is a compiler generated long_branch trampoline (essentially, a
0249  * trampoline that has a branch to _mcount()), we re-write the branch to
0250  * instead go to ftrace_[regs_]caller() and note down the location of this
0251  * trampoline.
0252  */
0253 static int setup_mcount_compiler_tramp(unsigned long tramp)
0254 {
0255     int i;
0256     ppc_inst_t op;
0257     unsigned long ptr;
0258 
0259     /* Is this a known long jump tramp? */
0260     for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
0261         if (ftrace_tramps[i] == tramp)
0262             return 0;
0263 
0264     /* New trampoline -- read where this goes */
0265     if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
0266         pr_debug("Fetching opcode failed.\n");
0267         return -1;
0268     }
0269 
0270     /* Is this a 24 bit branch? */
0271     if (!is_b_op(op)) {
0272         pr_debug("Trampoline is not a long branch tramp.\n");
0273         return -1;
0274     }
0275 
0276     /* lets find where the pointer goes */
0277     ptr = find_bl_target(tramp, op);
0278 
0279     if (ptr != ppc_global_function_entry((void *)_mcount)) {
0280         pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
0281         return -1;
0282     }
0283 
0284     /* Let's re-write the tramp to go to ftrace_[regs_]caller */
0285     if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
0286         ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
0287     else
0288         ptr = ppc_global_function_entry((void *)ftrace_caller);
0289 
0290     if (patch_branch((u32 *)tramp, ptr, 0)) {
0291         pr_debug("REL24 out of range!\n");
0292         return -1;
0293     }
0294 
0295     if (add_ftrace_tramp(tramp)) {
0296         pr_debug("No tramp locations left\n");
0297         return -1;
0298     }
0299 
0300     return 0;
0301 }
0302 
0303 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
0304 {
0305     unsigned long tramp, ip = rec->ip;
0306     ppc_inst_t op;
0307 
0308     /* Read where this goes */
0309     if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
0310         pr_err("Fetching opcode failed.\n");
0311         return -EFAULT;
0312     }
0313 
0314     /* Make sure that this is still a 24bit jump */
0315     if (!is_bl_op(op)) {
0316         pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
0317         return -EINVAL;
0318     }
0319 
0320     /* Let's find where the pointer goes */
0321     tramp = find_bl_target(ip, op);
0322 
0323     pr_devel("ip:%lx jumps to %lx", ip, tramp);
0324 
0325     if (setup_mcount_compiler_tramp(tramp)) {
0326         /* Are other trampolines reachable? */
0327         if (!find_ftrace_tramp(ip)) {
0328             pr_err("No ftrace trampolines reachable from %ps\n",
0329                     (void *)ip);
0330             return -EINVAL;
0331         }
0332     }
0333 
0334     if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
0335         pr_err("Patching NOP failed.\n");
0336         return -EPERM;
0337     }
0338 
0339     return 0;
0340 }
0341 
0342 int ftrace_make_nop(struct module *mod,
0343             struct dyn_ftrace *rec, unsigned long addr)
0344 {
0345     unsigned long ip = rec->ip;
0346     ppc_inst_t old, new;
0347 
0348     /*
0349      * If the calling address is more that 24 bits away,
0350      * then we had to use a trampoline to make the call.
0351      * Otherwise just update the call site.
0352      */
0353     if (test_24bit_addr(ip, addr)) {
0354         /* within range */
0355         old = ftrace_call_replace(ip, addr, 1);
0356         new = ppc_inst(PPC_RAW_NOP());
0357         return ftrace_modify_code(ip, old, new);
0358     } else if (core_kernel_text(ip)) {
0359         return __ftrace_make_nop_kernel(rec, addr);
0360     } else if (!IS_ENABLED(CONFIG_MODULES)) {
0361         return -EINVAL;
0362     }
0363 
0364     /*
0365      * Out of range jumps are called from modules.
0366      * We should either already have a pointer to the module
0367      * or it has been passed in.
0368      */
0369     if (!rec->arch.mod) {
0370         if (!mod) {
0371             pr_err("No module loaded addr=%lx\n", addr);
0372             return -EFAULT;
0373         }
0374         rec->arch.mod = mod;
0375     } else if (mod) {
0376         if (mod != rec->arch.mod) {
0377             pr_err("Record mod %p not equal to passed in mod %p\n",
0378                    rec->arch.mod, mod);
0379             return -EINVAL;
0380         }
0381         /* nothing to do if mod == rec->arch.mod */
0382     } else
0383         mod = rec->arch.mod;
0384 
0385     return __ftrace_make_nop(mod, rec, addr);
0386 }
0387 
0388 #ifdef CONFIG_MODULES
0389 /*
0390  * Examine the existing instructions for __ftrace_make_call.
0391  * They should effectively be a NOP, and follow formal constraints,
0392  * depending on the ABI. Return false if they don't.
0393  */
0394 static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
0395 {
0396     if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
0397         return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
0398     else
0399         return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
0400                ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
0401 }
0402 
0403 static int
0404 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
0405 {
0406     ppc_inst_t op[2];
0407     void *ip = (void *)rec->ip;
0408     unsigned long entry, ptr, tramp;
0409     struct module *mod = rec->arch.mod;
0410 
0411     /* read where this goes */
0412     if (copy_inst_from_kernel_nofault(op, ip))
0413         return -EFAULT;
0414 
0415     if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
0416         copy_inst_from_kernel_nofault(op + 1, ip + 4))
0417         return -EFAULT;
0418 
0419     if (!expected_nop_sequence(ip, op[0], op[1])) {
0420         pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
0421                ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
0422         return -EINVAL;
0423     }
0424 
0425     /* If we never set up ftrace trampoline(s), then bail */
0426     if (!mod->arch.tramp ||
0427         (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
0428         pr_err("No ftrace trampoline\n");
0429         return -EINVAL;
0430     }
0431 
0432     if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
0433         tramp = mod->arch.tramp_regs;
0434     else
0435         tramp = mod->arch.tramp;
0436 
0437     if (module_trampoline_target(mod, tramp, &ptr)) {
0438         pr_err("Failed to get trampoline target\n");
0439         return -EFAULT;
0440     }
0441 
0442     pr_devel("trampoline target %lx", ptr);
0443 
0444     entry = ppc_global_function_entry((void *)addr);
0445     /* This should match what was called */
0446     if (ptr != entry) {
0447         pr_err("addr %lx does not match expected %lx\n", ptr, entry);
0448         return -EINVAL;
0449     }
0450 
0451     if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
0452         pr_err("REL24 out of range!\n");
0453         return -EINVAL;
0454     }
0455 
0456     return 0;
0457 }
0458 #else
0459 static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
0460 {
0461     return 0;
0462 }
0463 #endif /* CONFIG_MODULES */
0464 
0465 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
0466 {
0467     ppc_inst_t op;
0468     void *ip = (void *)rec->ip;
0469     unsigned long tramp, entry, ptr;
0470 
0471     /* Make sure we're being asked to patch branch to a known ftrace addr */
0472     entry = ppc_global_function_entry((void *)ftrace_caller);
0473     ptr = ppc_global_function_entry((void *)addr);
0474 
0475     if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
0476         entry = ppc_global_function_entry((void *)ftrace_regs_caller);
0477 
0478     if (ptr != entry) {
0479         pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
0480         return -EINVAL;
0481     }
0482 
0483     /* Make sure we have a nop */
0484     if (copy_inst_from_kernel_nofault(&op, ip)) {
0485         pr_err("Unable to read ftrace location %p\n", ip);
0486         return -EFAULT;
0487     }
0488 
0489     if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
0490         pr_err("Unexpected call sequence at %p: %08lx\n",
0491                ip, ppc_inst_as_ulong(op));
0492         return -EINVAL;
0493     }
0494 
0495     tramp = find_ftrace_tramp((unsigned long)ip);
0496     if (!tramp) {
0497         pr_err("No ftrace trampolines reachable from %ps\n", ip);
0498         return -EINVAL;
0499     }
0500 
0501     if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
0502         pr_err("Error patching branch to ftrace tramp!\n");
0503         return -EINVAL;
0504     }
0505 
0506     return 0;
0507 }
0508 
0509 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
0510 {
0511     unsigned long ip = rec->ip;
0512     ppc_inst_t old, new;
0513 
0514     /*
0515      * If the calling address is more that 24 bits away,
0516      * then we had to use a trampoline to make the call.
0517      * Otherwise just update the call site.
0518      */
0519     if (test_24bit_addr(ip, addr)) {
0520         /* within range */
0521         old = ppc_inst(PPC_RAW_NOP());
0522         new = ftrace_call_replace(ip, addr, 1);
0523         return ftrace_modify_code(ip, old, new);
0524     } else if (core_kernel_text(ip)) {
0525         return __ftrace_make_call_kernel(rec, addr);
0526     } else if (!IS_ENABLED(CONFIG_MODULES)) {
0527         /* We should not get here without modules */
0528         return -EINVAL;
0529     }
0530 
0531     /*
0532      * Out of range jumps are called from modules.
0533      * Being that we are converting from nop, it had better
0534      * already have a module defined.
0535      */
0536     if (!rec->arch.mod) {
0537         pr_err("No module loaded\n");
0538         return -EINVAL;
0539     }
0540 
0541     return __ftrace_make_call(rec, addr);
0542 }
0543 
0544 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
0545 #ifdef CONFIG_MODULES
0546 static int
0547 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
0548                     unsigned long addr)
0549 {
0550     ppc_inst_t op;
0551     unsigned long ip = rec->ip;
0552     unsigned long entry, ptr, tramp;
0553     struct module *mod = rec->arch.mod;
0554 
0555     /* If we never set up ftrace trampolines, then bail */
0556     if (!mod->arch.tramp || !mod->arch.tramp_regs) {
0557         pr_err("No ftrace trampoline\n");
0558         return -EINVAL;
0559     }
0560 
0561     /* read where this goes */
0562     if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
0563         pr_err("Fetching opcode failed.\n");
0564         return -EFAULT;
0565     }
0566 
0567     /* Make sure that this is still a 24bit jump */
0568     if (!is_bl_op(op)) {
0569         pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
0570         return -EINVAL;
0571     }
0572 
0573     /* lets find where the pointer goes */
0574     tramp = find_bl_target(ip, op);
0575     entry = ppc_global_function_entry((void *)old_addr);
0576 
0577     pr_devel("ip:%lx jumps to %lx", ip, tramp);
0578 
0579     if (tramp != entry) {
0580         /* old_addr is not within range, so we must have used a trampoline */
0581         if (module_trampoline_target(mod, tramp, &ptr)) {
0582             pr_err("Failed to get trampoline target\n");
0583             return -EFAULT;
0584         }
0585 
0586         pr_devel("trampoline target %lx", ptr);
0587 
0588         /* This should match what was called */
0589         if (ptr != entry) {
0590             pr_err("addr %lx does not match expected %lx\n", ptr, entry);
0591             return -EINVAL;
0592         }
0593     }
0594 
0595     /* The new target may be within range */
0596     if (test_24bit_addr(ip, addr)) {
0597         /* within range */
0598         if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
0599             pr_err("REL24 out of range!\n");
0600             return -EINVAL;
0601         }
0602 
0603         return 0;
0604     }
0605 
0606     if (rec->flags & FTRACE_FL_REGS)
0607         tramp = mod->arch.tramp_regs;
0608     else
0609         tramp = mod->arch.tramp;
0610 
0611     if (module_trampoline_target(mod, tramp, &ptr)) {
0612         pr_err("Failed to get trampoline target\n");
0613         return -EFAULT;
0614     }
0615 
0616     pr_devel("trampoline target %lx", ptr);
0617 
0618     entry = ppc_global_function_entry((void *)addr);
0619     /* This should match what was called */
0620     if (ptr != entry) {
0621         pr_err("addr %lx does not match expected %lx\n", ptr, entry);
0622         return -EINVAL;
0623     }
0624 
0625     if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
0626         pr_err("REL24 out of range!\n");
0627         return -EINVAL;
0628     }
0629 
0630     return 0;
0631 }
0632 #else
0633 static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
0634 {
0635     return 0;
0636 }
0637 #endif
0638 
0639 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
0640             unsigned long addr)
0641 {
0642     unsigned long ip = rec->ip;
0643     ppc_inst_t old, new;
0644 
0645     /*
0646      * If the calling address is more that 24 bits away,
0647      * then we had to use a trampoline to make the call.
0648      * Otherwise just update the call site.
0649      */
0650     if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
0651         /* within range */
0652         old = ftrace_call_replace(ip, old_addr, 1);
0653         new = ftrace_call_replace(ip, addr, 1);
0654         return ftrace_modify_code(ip, old, new);
0655     } else if (core_kernel_text(ip)) {
0656         /*
0657          * We always patch out of range locations to go to the regs
0658          * variant, so there is nothing to do here
0659          */
0660         return 0;
0661     } else if (!IS_ENABLED(CONFIG_MODULES)) {
0662         /* We should not get here without modules */
0663         return -EINVAL;
0664     }
0665 
0666     /*
0667      * Out of range jumps are called from modules.
0668      */
0669     if (!rec->arch.mod) {
0670         pr_err("No module loaded\n");
0671         return -EINVAL;
0672     }
0673 
0674     return __ftrace_modify_call(rec, old_addr, addr);
0675 }
0676 #endif
0677 
0678 int ftrace_update_ftrace_func(ftrace_func_t func)
0679 {
0680     unsigned long ip = (unsigned long)(&ftrace_call);
0681     ppc_inst_t old, new;
0682     int ret;
0683 
0684     old = ppc_inst_read((u32 *)&ftrace_call);
0685     new = ftrace_call_replace(ip, (unsigned long)func, 1);
0686     ret = ftrace_modify_code(ip, old, new);
0687 
0688     /* Also update the regs callback function */
0689     if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
0690         ip = (unsigned long)(&ftrace_regs_call);
0691         old = ppc_inst_read((u32 *)&ftrace_regs_call);
0692         new = ftrace_call_replace(ip, (unsigned long)func, 1);
0693         ret = ftrace_modify_code(ip, old, new);
0694     }
0695 
0696     return ret;
0697 }
0698 
0699 /*
0700  * Use the default ftrace_modify_all_code, but without
0701  * stop_machine().
0702  */
0703 void arch_ftrace_update_code(int command)
0704 {
0705     ftrace_modify_all_code(command);
0706 }
0707 
0708 #ifdef CONFIG_PPC64
0709 #define PACATOC offsetof(struct paca_struct, kernel_toc)
0710 
0711 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
0712 
0713 void ftrace_free_init_tramp(void)
0714 {
0715     int i;
0716 
0717     for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
0718         if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
0719             ftrace_tramps[i] = 0;
0720             return;
0721         }
0722 }
0723 
0724 int __init ftrace_dyn_arch_init(void)
0725 {
0726     int i;
0727     unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
0728     u32 stub_insns[] = {
0729         PPC_RAW_LD(_R12, _R13, PACATOC),
0730         PPC_RAW_ADDIS(_R12, _R12, 0),
0731         PPC_RAW_ADDI(_R12, _R12, 0),
0732         PPC_RAW_MTCTR(_R12),
0733         PPC_RAW_BCTR()
0734     };
0735     unsigned long addr;
0736     long reladdr;
0737 
0738     if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
0739         addr = ppc_global_function_entry((void *)ftrace_regs_caller);
0740     else
0741         addr = ppc_global_function_entry((void *)ftrace_caller);
0742 
0743     reladdr = addr - kernel_toc_addr();
0744 
0745     if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
0746         pr_err("Address of %ps out of range of kernel_toc.\n",
0747                 (void *)addr);
0748         return -1;
0749     }
0750 
0751     for (i = 0; i < 2; i++) {
0752         memcpy(tramp[i], stub_insns, sizeof(stub_insns));
0753         tramp[i][1] |= PPC_HA(reladdr);
0754         tramp[i][2] |= PPC_LO(reladdr);
0755         add_ftrace_tramp((unsigned long)tramp[i]);
0756     }
0757 
0758     return 0;
0759 }
0760 #endif
0761 
0762 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0763 
0764 extern void ftrace_graph_call(void);
0765 extern void ftrace_graph_stub(void);
0766 
0767 static int ftrace_modify_ftrace_graph_caller(bool enable)
0768 {
0769     unsigned long ip = (unsigned long)(&ftrace_graph_call);
0770     unsigned long addr = (unsigned long)(&ftrace_graph_caller);
0771     unsigned long stub = (unsigned long)(&ftrace_graph_stub);
0772     ppc_inst_t old, new;
0773 
0774     if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
0775         return 0;
0776 
0777     old = ftrace_call_replace(ip, enable ? stub : addr, 0);
0778     new = ftrace_call_replace(ip, enable ? addr : stub, 0);
0779 
0780     return ftrace_modify_code(ip, old, new);
0781 }
0782 
0783 int ftrace_enable_ftrace_graph_caller(void)
0784 {
0785     return ftrace_modify_ftrace_graph_caller(true);
0786 }
0787 
0788 int ftrace_disable_ftrace_graph_caller(void)
0789 {
0790     return ftrace_modify_ftrace_graph_caller(false);
0791 }
0792 
0793 /*
0794  * Hook the return address and push it in the stack of return addrs
0795  * in current thread info. Return the address we want to divert to.
0796  */
0797 static unsigned long
0798 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
0799 {
0800     unsigned long return_hooker;
0801     int bit;
0802 
0803     if (unlikely(ftrace_graph_is_dead()))
0804         goto out;
0805 
0806     if (unlikely(atomic_read(&current->tracing_graph_pause)))
0807         goto out;
0808 
0809     bit = ftrace_test_recursion_trylock(ip, parent);
0810     if (bit < 0)
0811         goto out;
0812 
0813     return_hooker = ppc_function_entry(return_to_handler);
0814 
0815     if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
0816         parent = return_hooker;
0817 
0818     ftrace_test_recursion_unlock(bit);
0819 out:
0820     return parent;
0821 }
0822 
0823 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
0824 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
0825                struct ftrace_ops *op, struct ftrace_regs *fregs)
0826 {
0827     fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
0828 }
0829 #else
0830 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
0831                     unsigned long sp)
0832 {
0833     return __prepare_ftrace_return(parent, ip, sp);
0834 }
0835 #endif
0836 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
0837 
0838 #ifdef CONFIG_PPC64_ELF_ABI_V1
0839 char *arch_ftrace_match_adjust(char *str, const char *search)
0840 {
0841     if (str[0] == '.' && search[0] != '.')
0842         return str + 1;
0843     else
0844         return str;
0845 }
0846 #endif /* CONFIG_PPC64_ELF_ABI_V1 */