0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0014
0015 #include <linux/spinlock.h>
0016 #include <linux/hardirq.h>
0017 #include <linux/uaccess.h>
0018 #include <linux/ftrace.h>
0019 #include <linux/percpu.h>
0020 #include <linux/sched.h>
0021 #include <linux/slab.h>
0022 #include <linux/init.h>
0023 #include <linux/list.h>
0024 #include <linux/module.h>
0025 #include <linux/memory.h>
0026 #include <linux/vmalloc.h>
0027
0028 #include <trace/syscall.h>
0029
0030 #include <asm/set_memory.h>
0031 #include <asm/kprobes.h>
0032 #include <asm/ftrace.h>
0033 #include <asm/nops.h>
0034 #include <asm/text-patching.h>
0035
0036 #ifdef CONFIG_DYNAMIC_FTRACE
0037
0038 static int ftrace_poke_late = 0;
0039
0040 void ftrace_arch_code_modify_prepare(void)
0041 __acquires(&text_mutex)
0042 {
0043
0044
0045
0046
0047
0048 mutex_lock(&text_mutex);
0049 ftrace_poke_late = 1;
0050 }
0051
0052 void ftrace_arch_code_modify_post_process(void)
0053 __releases(&text_mutex)
0054 {
0055
0056
0057
0058
0059
0060 text_poke_finish();
0061 ftrace_poke_late = 0;
0062 mutex_unlock(&text_mutex);
0063 }
0064
0065 static const char *ftrace_nop_replace(void)
0066 {
0067 return x86_nops[5];
0068 }
0069
0070 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
0071 {
0072 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
0073 }
0074
0075 static int ftrace_verify_code(unsigned long ip, const char *old_code)
0076 {
0077 char cur_code[MCOUNT_INSN_SIZE];
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
0088 WARN_ON(1);
0089 return -EFAULT;
0090 }
0091
0092
0093 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
0094 ftrace_expected = old_code;
0095 WARN_ON(1);
0096 return -EINVAL;
0097 }
0098
0099 return 0;
0100 }
0101
0102
0103
0104
0105
0106
0107 static int __ref
0108 ftrace_modify_code_direct(unsigned long ip, const char *old_code,
0109 const char *new_code)
0110 {
0111 int ret = ftrace_verify_code(ip, old_code);
0112 if (ret)
0113 return ret;
0114
0115
0116 if (ftrace_poke_late)
0117 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
0118 else
0119 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
0120 return 0;
0121 }
0122
0123 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
0124 {
0125 unsigned long ip = rec->ip;
0126 const char *new, *old;
0127
0128 old = ftrace_call_replace(ip, addr);
0129 new = ftrace_nop_replace();
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 if (addr == MCOUNT_ADDR)
0140 return ftrace_modify_code_direct(ip, old, new);
0141
0142
0143
0144
0145
0146 WARN_ONCE(1, "invalid use of ftrace_make_nop");
0147 return -EINVAL;
0148 }
0149
0150 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
0151 {
0152 unsigned long ip = rec->ip;
0153 const char *new, *old;
0154
0155 old = ftrace_nop_replace();
0156 new = ftrace_call_replace(ip, addr);
0157
0158
0159 return ftrace_modify_code_direct(rec->ip, old, new);
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
0171 unsigned long addr)
0172 {
0173 WARN_ON(1);
0174 return -EINVAL;
0175 }
0176
0177 int ftrace_update_ftrace_func(ftrace_func_t func)
0178 {
0179 unsigned long ip;
0180 const char *new;
0181
0182 ip = (unsigned long)(&ftrace_call);
0183 new = ftrace_call_replace(ip, (unsigned long)func);
0184 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
0185
0186 ip = (unsigned long)(&ftrace_regs_call);
0187 new = ftrace_call_replace(ip, (unsigned long)func);
0188 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
0189
0190 return 0;
0191 }
0192
0193 void ftrace_replace_code(int enable)
0194 {
0195 struct ftrace_rec_iter *iter;
0196 struct dyn_ftrace *rec;
0197 const char *new, *old;
0198 int ret;
0199
0200 for_ftrace_rec_iter(iter) {
0201 rec = ftrace_rec_iter_record(iter);
0202
0203 switch (ftrace_test_record(rec, enable)) {
0204 case FTRACE_UPDATE_IGNORE:
0205 default:
0206 continue;
0207
0208 case FTRACE_UPDATE_MAKE_CALL:
0209 old = ftrace_nop_replace();
0210 break;
0211
0212 case FTRACE_UPDATE_MODIFY_CALL:
0213 case FTRACE_UPDATE_MAKE_NOP:
0214 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
0215 break;
0216 }
0217
0218 ret = ftrace_verify_code(rec->ip, old);
0219 if (ret) {
0220 ftrace_bug(ret, rec);
0221 return;
0222 }
0223 }
0224
0225 for_ftrace_rec_iter(iter) {
0226 rec = ftrace_rec_iter_record(iter);
0227
0228 switch (ftrace_test_record(rec, enable)) {
0229 case FTRACE_UPDATE_IGNORE:
0230 default:
0231 continue;
0232
0233 case FTRACE_UPDATE_MAKE_CALL:
0234 case FTRACE_UPDATE_MODIFY_CALL:
0235 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
0236 break;
0237
0238 case FTRACE_UPDATE_MAKE_NOP:
0239 new = ftrace_nop_replace();
0240 break;
0241 }
0242
0243 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
0244 ftrace_update_record(rec, enable);
0245 }
0246 text_poke_finish();
0247 }
0248
0249 void arch_ftrace_update_code(int command)
0250 {
0251 ftrace_modify_all_code(command);
0252 }
0253
0254
0255 #ifdef CONFIG_X86_64
0256
0257 #ifdef CONFIG_MODULES
0258 #include <linux/moduleloader.h>
0259
0260 static inline void *alloc_tramp(unsigned long size)
0261 {
0262 return module_alloc(size);
0263 }
0264 static inline void tramp_free(void *tramp)
0265 {
0266 module_memfree(tramp);
0267 }
0268 #else
0269
0270 static inline void *alloc_tramp(unsigned long size)
0271 {
0272 return NULL;
0273 }
0274 static inline void tramp_free(void *tramp) { }
0275 #endif
0276
0277
0278 extern void ftrace_regs_caller_end(void);
0279 extern void ftrace_regs_caller_ret(void);
0280 extern void ftrace_caller_end(void);
0281 extern void ftrace_caller_op_ptr(void);
0282 extern void ftrace_regs_caller_op_ptr(void);
0283 extern void ftrace_regs_caller_jmp(void);
0284
0285
0286
0287 #define OP_REF_SIZE 7
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 union ftrace_op_code_union {
0298 char code[OP_REF_SIZE];
0299 struct {
0300 char op[3];
0301 int offset;
0302 } __attribute__((packed));
0303 };
0304
0305 #define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
0306
0307 static unsigned long
0308 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
0309 {
0310 unsigned long start_offset;
0311 unsigned long end_offset;
0312 unsigned long op_offset;
0313 unsigned long call_offset;
0314 unsigned long jmp_offset;
0315 unsigned long offset;
0316 unsigned long npages;
0317 unsigned long size;
0318 unsigned long *ptr;
0319 void *trampoline;
0320 void *ip;
0321
0322 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
0323 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
0324 union ftrace_op_code_union op_ptr;
0325 int ret;
0326
0327 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
0328 start_offset = (unsigned long)ftrace_regs_caller;
0329 end_offset = (unsigned long)ftrace_regs_caller_end;
0330 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
0331 call_offset = (unsigned long)ftrace_regs_call;
0332 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
0333 } else {
0334 start_offset = (unsigned long)ftrace_caller;
0335 end_offset = (unsigned long)ftrace_caller_end;
0336 op_offset = (unsigned long)ftrace_caller_op_ptr;
0337 call_offset = (unsigned long)ftrace_call;
0338 jmp_offset = 0;
0339 }
0340
0341 size = end_offset - start_offset;
0342
0343
0344
0345
0346
0347
0348 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
0349 if (!trampoline)
0350 return 0;
0351
0352 *tramp_size = size + RET_SIZE + sizeof(void *);
0353 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
0354
0355
0356 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
0357 if (WARN_ON(ret < 0))
0358 goto fail;
0359
0360 ip = trampoline + size;
0361 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
0362 __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE);
0363 else
0364 memcpy(ip, retq, sizeof(retq));
0365
0366
0367 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
0368
0369 ip = trampoline + (jmp_offset - start_offset);
0370 if (WARN_ON(*(char *)ip != 0x75))
0371 goto fail;
0372 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
0373 if (ret < 0)
0374 goto fail;
0375 }
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
0386 *ptr = (unsigned long)ops;
0387
0388 op_offset -= start_offset;
0389 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
0390
0391
0392 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
0393 goto fail;
0394
0395
0396 offset = (unsigned long)ptr;
0397 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
0398
0399 op_ptr.offset = offset;
0400
0401
0402 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
0403
0404
0405 mutex_lock(&text_mutex);
0406 call_offset -= start_offset;
0407 memcpy(trampoline + call_offset,
0408 text_gen_insn(CALL_INSN_OPCODE,
0409 trampoline + call_offset,
0410 ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
0411 mutex_unlock(&text_mutex);
0412
0413
0414 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
0415
0416 set_vm_flush_reset_perms(trampoline);
0417
0418 if (likely(system_state != SYSTEM_BOOTING))
0419 set_memory_ro((unsigned long)trampoline, npages);
0420 set_memory_x((unsigned long)trampoline, npages);
0421 return (unsigned long)trampoline;
0422 fail:
0423 tramp_free(trampoline);
0424 return 0;
0425 }
0426
0427 void set_ftrace_ops_ro(void)
0428 {
0429 struct ftrace_ops *ops;
0430 unsigned long start_offset;
0431 unsigned long end_offset;
0432 unsigned long npages;
0433 unsigned long size;
0434
0435 do_for_each_ftrace_op(ops, ftrace_ops_list) {
0436 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
0437 continue;
0438
0439 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
0440 start_offset = (unsigned long)ftrace_regs_caller;
0441 end_offset = (unsigned long)ftrace_regs_caller_end;
0442 } else {
0443 start_offset = (unsigned long)ftrace_caller;
0444 end_offset = (unsigned long)ftrace_caller_end;
0445 }
0446 size = end_offset - start_offset;
0447 size = size + RET_SIZE + sizeof(void *);
0448 npages = DIV_ROUND_UP(size, PAGE_SIZE);
0449 set_memory_ro((unsigned long)ops->trampoline, npages);
0450 } while_for_each_ftrace_op(ops);
0451 }
0452
0453 static unsigned long calc_trampoline_call_offset(bool save_regs)
0454 {
0455 unsigned long start_offset;
0456 unsigned long call_offset;
0457
0458 if (save_regs) {
0459 start_offset = (unsigned long)ftrace_regs_caller;
0460 call_offset = (unsigned long)ftrace_regs_call;
0461 } else {
0462 start_offset = (unsigned long)ftrace_caller;
0463 call_offset = (unsigned long)ftrace_call;
0464 }
0465
0466 return call_offset - start_offset;
0467 }
0468
0469 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
0470 {
0471 ftrace_func_t func;
0472 unsigned long offset;
0473 unsigned long ip;
0474 unsigned int size;
0475 const char *new;
0476
0477 if (!ops->trampoline) {
0478 ops->trampoline = create_trampoline(ops, &size);
0479 if (!ops->trampoline)
0480 return;
0481 ops->trampoline_size = size;
0482 return;
0483 }
0484
0485
0486
0487
0488
0489 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
0490 return;
0491
0492 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
0493 ip = ops->trampoline + offset;
0494 func = ftrace_ops_get_func(ops);
0495
0496 mutex_lock(&text_mutex);
0497
0498 new = ftrace_call_replace(ip, (unsigned long)func);
0499 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
0500 mutex_unlock(&text_mutex);
0501 }
0502
0503
0504 static void *addr_from_call(void *ptr)
0505 {
0506 union text_poke_insn call;
0507 int ret;
0508
0509 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
0510 if (WARN_ON_ONCE(ret < 0))
0511 return NULL;
0512
0513
0514 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
0515 pr_warn("Expected E8, got %x\n", call.opcode);
0516 return NULL;
0517 }
0518
0519 return ptr + CALL_INSN_SIZE + call.disp;
0520 }
0521
0522 void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
0523 unsigned long frame_pointer);
0524
0525
0526
0527
0528
0529 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
0530 {
0531 unsigned long offset;
0532 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
0533 void *ptr;
0534
0535 if (ops && ops->trampoline) {
0536 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
0537 defined(CONFIG_FUNCTION_GRAPH_TRACER)
0538
0539
0540
0541
0542 if (ops->trampoline == FTRACE_GRAPH_ADDR)
0543 return (void *)prepare_ftrace_return;
0544 #endif
0545 return NULL;
0546 }
0547
0548 offset = calc_trampoline_call_offset(save_regs);
0549
0550 if (save_regs)
0551 ptr = (void *)FTRACE_REGS_ADDR + offset;
0552 else
0553 ptr = (void *)FTRACE_ADDR + offset;
0554
0555 return addr_from_call(ptr);
0556 }
0557
0558 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
0559 {
0560 unsigned long offset;
0561
0562
0563 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
0564 return static_tramp_func(ops, rec);
0565
0566 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
0567 return addr_from_call((void *)ops->trampoline + offset);
0568 }
0569
0570 void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
0571 {
0572 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
0573 return;
0574
0575 tramp_free((void *)ops->trampoline);
0576 ops->trampoline = 0;
0577 }
0578
0579 #endif
0580 #endif
0581
0582 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0583
0584 #if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
0585 extern void ftrace_graph_call(void);
0586 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
0587 {
0588 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
0589 }
0590
0591 static int ftrace_mod_jmp(unsigned long ip, void *func)
0592 {
0593 const char *new;
0594
0595 new = ftrace_jmp_replace(ip, (unsigned long)func);
0596 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
0597 return 0;
0598 }
0599
0600 int ftrace_enable_ftrace_graph_caller(void)
0601 {
0602 unsigned long ip = (unsigned long)(&ftrace_graph_call);
0603
0604 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
0605 }
0606
0607 int ftrace_disable_ftrace_graph_caller(void)
0608 {
0609 unsigned long ip = (unsigned long)(&ftrace_graph_call);
0610
0611 return ftrace_mod_jmp(ip, &ftrace_stub);
0612 }
0613 #endif
0614
0615
0616
0617
0618
0619 void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
0620 unsigned long frame_pointer)
0621 {
0622 unsigned long return_hooker = (unsigned long)&return_to_handler;
0623 int bit;
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 if (unlikely((long)__builtin_frame_address(0) >= 0))
0635 return;
0636
0637 if (unlikely(ftrace_graph_is_dead()))
0638 return;
0639
0640 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
0641 return;
0642
0643 bit = ftrace_test_recursion_trylock(ip, *parent);
0644 if (bit < 0)
0645 return;
0646
0647 if (!function_graph_enter(*parent, ip, frame_pointer, parent))
0648 *parent = return_hooker;
0649
0650 ftrace_test_recursion_unlock(bit);
0651 }
0652
0653 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
0654 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
0655 struct ftrace_ops *op, struct ftrace_regs *fregs)
0656 {
0657 struct pt_regs *regs = &fregs->regs;
0658 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
0659
0660 prepare_ftrace_return(ip, (unsigned long *)stack, 0);
0661 }
0662 #endif
0663
0664 #endif