Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Dynamic function tracer architecture backend.
0004  *
0005  * Copyright IBM Corp. 2009,2014
0006  *
0007  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
0008  */
0009 
0010 #include <linux/moduleloader.h>
0011 #include <linux/hardirq.h>
0012 #include <linux/uaccess.h>
0013 #include <linux/ftrace.h>
0014 #include <linux/kernel.h>
0015 #include <linux/types.h>
0016 #include <linux/kprobes.h>
0017 #include <trace/syscall.h>
0018 #include <asm/asm-offsets.h>
0019 #include <asm/text-patching.h>
0020 #include <asm/cacheflush.h>
0021 #include <asm/ftrace.lds.h>
0022 #include <asm/nospec-branch.h>
0023 #include <asm/set_memory.h>
0024 #include "entry.h"
0025 #include "ftrace.h"
0026 
0027 /*
0028  * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
0029  * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
0030  * (since gcc 9 / clang 10) is used.
0031  * In both cases the original and also the disabled function prologue contains
0032  * only a single six byte instruction and looks like this:
0033  * >    brcl    0,0         # offset 0
0034  * To enable ftrace the code gets patched like above and afterwards looks
0035  * like this:
0036  * >    brasl   %r0,ftrace_caller   # offset 0
0037  *
0038  * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
0039  * The ftrace function gets called with a non-standard C function call ABI
0040  * where r0 contains the return address. It is also expected that the called
0041  * function only clobbers r0 and r1, but restores r2-r15.
0042  * For module code we can't directly jump to ftrace caller, but need a
0043  * trampoline (ftrace_plt), which clobbers also r1.
0044  */
0045 
0046 void *ftrace_func __read_mostly = ftrace_stub;
0047 struct ftrace_insn {
0048     u16 opc;
0049     s32 disp;
0050 } __packed;
0051 
0052 asm(
0053     "   .align 16\n"
0054     "ftrace_shared_hotpatch_trampoline_br:\n"
0055     "   lmg %r0,%r1,2(%r1)\n"
0056     "   br  %r1\n"
0057     "ftrace_shared_hotpatch_trampoline_br_end:\n"
0058 );
0059 
0060 #ifdef CONFIG_EXPOLINE
0061 asm(
0062     "   .align 16\n"
0063     "ftrace_shared_hotpatch_trampoline_exrl:\n"
0064     "   lmg %r0,%r1,2(%r1)\n"
0065     "   exrl    %r0,0f\n"
0066     "   j   .\n"
0067     "0: br  %r1\n"
0068     "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
0069 );
0070 #endif /* CONFIG_EXPOLINE */
0071 
0072 #ifdef CONFIG_MODULES
0073 static char *ftrace_plt;
0074 #endif /* CONFIG_MODULES */
0075 
0076 static const char *ftrace_shared_hotpatch_trampoline(const char **end)
0077 {
0078     const char *tstart, *tend;
0079 
0080     tstart = ftrace_shared_hotpatch_trampoline_br;
0081     tend = ftrace_shared_hotpatch_trampoline_br_end;
0082 #ifdef CONFIG_EXPOLINE
0083     if (!nospec_disable) {
0084         tstart = ftrace_shared_hotpatch_trampoline_exrl;
0085         tend = ftrace_shared_hotpatch_trampoline_exrl_end;
0086     }
0087 #endif /* CONFIG_EXPOLINE */
0088     if (end)
0089         *end = tend;
0090     return tstart;
0091 }
0092 
0093 bool ftrace_need_init_nop(void)
0094 {
0095     return true;
0096 }
0097 
0098 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
0099 {
0100     static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
0101         __ftrace_hotpatch_trampolines_start;
0102     static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
0103     static struct ftrace_hotpatch_trampoline *trampoline;
0104     struct ftrace_hotpatch_trampoline **next_trampoline;
0105     struct ftrace_hotpatch_trampoline *trampolines_end;
0106     struct ftrace_hotpatch_trampoline tmp;
0107     struct ftrace_insn *insn;
0108     const char *shared;
0109     s32 disp;
0110 
0111     BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
0112              SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
0113 
0114     next_trampoline = &next_vmlinux_trampoline;
0115     trampolines_end = __ftrace_hotpatch_trampolines_end;
0116     shared = ftrace_shared_hotpatch_trampoline(NULL);
0117 #ifdef CONFIG_MODULES
0118     if (mod) {
0119         next_trampoline = &mod->arch.next_trampoline;
0120         trampolines_end = mod->arch.trampolines_end;
0121         shared = ftrace_plt;
0122     }
0123 #endif
0124 
0125     if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
0126         return -ENOMEM;
0127     trampoline = (*next_trampoline)++;
0128 
0129     /* Check for the compiler-generated fentry nop (brcl 0, .). */
0130     if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
0131         return -EINVAL;
0132 
0133     /* Generate the trampoline. */
0134     tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
0135     tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
0136     tmp.interceptor = FTRACE_ADDR;
0137     tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
0138     s390_kernel_write(trampoline, &tmp, sizeof(tmp));
0139 
0140     /* Generate a jump to the trampoline. */
0141     disp = ((char *)trampoline - (char *)rec->ip) / 2;
0142     insn = (struct ftrace_insn *)rec->ip;
0143     s390_kernel_write(&insn->disp, &disp, sizeof(disp));
0144 
0145     return 0;
0146 }
0147 
0148 static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
0149 {
0150     struct ftrace_hotpatch_trampoline *trampoline;
0151     struct ftrace_insn insn;
0152     s64 disp;
0153     u16 opc;
0154 
0155     if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
0156         return ERR_PTR(-EFAULT);
0157     disp = (s64)insn.disp * 2;
0158     trampoline = (void *)(rec->ip + disp);
0159     if (get_kernel_nofault(opc, &trampoline->brasl_opc))
0160         return ERR_PTR(-EFAULT);
0161     if (opc != 0xc015)
0162         return ERR_PTR(-EINVAL);
0163     return trampoline;
0164 }
0165 
0166 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
0167                unsigned long addr)
0168 {
0169     struct ftrace_hotpatch_trampoline *trampoline;
0170     u64 old;
0171 
0172     trampoline = ftrace_get_trampoline(rec);
0173     if (IS_ERR(trampoline))
0174         return PTR_ERR(trampoline);
0175     if (get_kernel_nofault(old, &trampoline->interceptor))
0176         return -EFAULT;
0177     if (old != old_addr)
0178         return -EINVAL;
0179     s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
0180     return 0;
0181 }
0182 
0183 static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
0184 {
0185     u16 old;
0186     u8 op;
0187 
0188     if (get_kernel_nofault(old, addr))
0189         return -EFAULT;
0190     if (old != expected)
0191         return -EINVAL;
0192     /* set mask field to all ones or zeroes */
0193     op = enable ? 0xf4 : 0x04;
0194     s390_kernel_write((char *)addr + 1, &op, sizeof(op));
0195     return 0;
0196 }
0197 
0198 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
0199             unsigned long addr)
0200 {
0201     /* Expect brcl 0xf,... */
0202     return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
0203 }
0204 
0205 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
0206 {
0207     struct ftrace_hotpatch_trampoline *trampoline;
0208 
0209     trampoline = ftrace_get_trampoline(rec);
0210     if (IS_ERR(trampoline))
0211         return PTR_ERR(trampoline);
0212     s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
0213     /* Expect brcl 0x0,... */
0214     return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
0215 }
0216 
0217 int ftrace_update_ftrace_func(ftrace_func_t func)
0218 {
0219     ftrace_func = func;
0220     return 0;
0221 }
0222 
0223 void arch_ftrace_update_code(int command)
0224 {
0225     ftrace_modify_all_code(command);
0226 }
0227 
0228 void ftrace_arch_code_modify_post_process(void)
0229 {
0230     /*
0231      * Flush any pre-fetched instructions on all
0232      * CPUs to make the new code visible.
0233      */
0234     text_poke_sync_lock();
0235 }
0236 
0237 #ifdef CONFIG_MODULES
0238 
0239 static int __init ftrace_plt_init(void)
0240 {
0241     const char *start, *end;
0242 
0243     ftrace_plt = module_alloc(PAGE_SIZE);
0244     if (!ftrace_plt)
0245         panic("cannot allocate ftrace plt\n");
0246 
0247     start = ftrace_shared_hotpatch_trampoline(&end);
0248     memcpy(ftrace_plt, start, end - start);
0249     set_memory_ro((unsigned long)ftrace_plt, 1);
0250     return 0;
0251 }
0252 device_initcall(ftrace_plt_init);
0253 
0254 #endif /* CONFIG_MODULES */
0255 
0256 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0257 /*
0258  * Hook the return address and push it in the stack of return addresses
0259  * in current thread info.
0260  */
0261 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
0262                     unsigned long ip)
0263 {
0264     if (unlikely(ftrace_graph_is_dead()))
0265         goto out;
0266     if (unlikely(atomic_read(&current->tracing_graph_pause)))
0267         goto out;
0268     ip -= MCOUNT_INSN_SIZE;
0269     if (!function_graph_enter(ra, ip, 0, (void *) sp))
0270         ra = (unsigned long) return_to_handler;
0271 out:
0272     return ra;
0273 }
0274 NOKPROBE_SYMBOL(prepare_ftrace_return);
0275 
0276 /*
0277  * Patch the kernel code at ftrace_graph_caller location. The instruction
0278  * there is branch relative on condition. To enable the ftrace graph code
0279  * block, we simply patch the mask field of the instruction to zero and
0280  * turn the instruction into a nop.
0281  * To disable the ftrace graph code the mask field will be patched to
0282  * all ones, which turns the instruction into an unconditional branch.
0283  */
0284 int ftrace_enable_ftrace_graph_caller(void)
0285 {
0286     int rc;
0287 
0288     /* Expect brc 0xf,... */
0289     rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
0290     if (rc)
0291         return rc;
0292     text_poke_sync_lock();
0293     return 0;
0294 }
0295 
0296 int ftrace_disable_ftrace_graph_caller(void)
0297 {
0298     int rc;
0299 
0300     /* Expect brc 0x0,... */
0301     rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
0302     if (rc)
0303         return rc;
0304     text_poke_sync_lock();
0305     return 0;
0306 }
0307 
0308 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
0309 
0310 #ifdef CONFIG_KPROBES_ON_FTRACE
0311 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
0312         struct ftrace_ops *ops, struct ftrace_regs *fregs)
0313 {
0314     struct kprobe_ctlblk *kcb;
0315     struct pt_regs *regs;
0316     struct kprobe *p;
0317     int bit;
0318 
0319     bit = ftrace_test_recursion_trylock(ip, parent_ip);
0320     if (bit < 0)
0321         return;
0322 
0323     regs = ftrace_get_regs(fregs);
0324     p = get_kprobe((kprobe_opcode_t *)ip);
0325     if (!regs || unlikely(!p) || kprobe_disabled(p))
0326         goto out;
0327 
0328     if (kprobe_running()) {
0329         kprobes_inc_nmissed_count(p);
0330         goto out;
0331     }
0332 
0333     __this_cpu_write(current_kprobe, p);
0334 
0335     kcb = get_kprobe_ctlblk();
0336     kcb->kprobe_status = KPROBE_HIT_ACTIVE;
0337 
0338     instruction_pointer_set(regs, ip);
0339 
0340     if (!p->pre_handler || !p->pre_handler(p, regs)) {
0341 
0342         instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
0343 
0344         if (unlikely(p->post_handler)) {
0345             kcb->kprobe_status = KPROBE_HIT_SSDONE;
0346             p->post_handler(p, regs, 0);
0347         }
0348     }
0349     __this_cpu_write(current_kprobe, NULL);
0350 out:
0351     ftrace_test_recursion_unlock(bit);
0352 }
0353 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
0354 
0355 int arch_prepare_kprobe_ftrace(struct kprobe *p)
0356 {
0357     p->ainsn.insn = NULL;
0358     return 0;
0359 }
0360 #endif