0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/uaccess.h>
0013 #include <linux/init.h>
0014 #include <linux/ftrace.h>
0015 #include <linux/syscalls.h>
0016
0017 #include <asm/asm.h>
0018 #include <asm/asm-offsets.h>
0019 #include <asm/cacheflush.h>
0020 #include <asm/syscall.h>
0021 #include <asm/uasm.h>
0022 #include <asm/unistd.h>
0023
0024 #include <asm-generic/sections.h>
0025
0026 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
0027 #define MCOUNT_OFFSET_INSNS 5
0028 #else
0029 #define MCOUNT_OFFSET_INSNS 4
0030 #endif
0031
0032 #ifdef CONFIG_DYNAMIC_FTRACE
0033
0034
0035 void arch_ftrace_update_code(int command)
0036 {
0037 ftrace_modify_all_code(command);
0038 }
0039
0040 #define JAL 0x0c000000
0041 #define ADDR_MASK 0x03ffffff
0042 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
0043
0044 #define INSN_NOP 0x00000000
0045 #define INSN_JAL(addr) \
0046 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
0047
0048 static unsigned int insn_jal_ftrace_caller __read_mostly;
0049 static unsigned int insn_la_mcount[2] __read_mostly;
0050 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
0051
0052 static inline void ftrace_dyn_arch_init_insns(void)
0053 {
0054 u32 *buf;
0055 unsigned int v1;
0056
0057
0058 v1 = 3;
0059 buf = (u32 *)&insn_la_mcount[0];
0060 UASM_i_LA(&buf, v1, MCOUNT_ADDR);
0061
0062
0063 buf = (u32 *)&insn_jal_ftrace_caller;
0064 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
0065
0066 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0067
0068 buf = (u32 *)&insn_j_ftrace_graph_caller;
0069 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
0070 #endif
0071 }
0072
0073 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
0074 {
0075 int faulted;
0076
0077
0078 safe_store_code(new_code, ip, faulted);
0079
0080 if (unlikely(faulted))
0081 return -EFAULT;
0082
0083 flush_icache_range(ip, ip + 8);
0084
0085 return 0;
0086 }
0087
0088 #ifndef CONFIG_64BIT
0089 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
0090 unsigned int new_code2)
0091 {
0092 int faulted;
0093
0094 safe_store_code(new_code1, ip, faulted);
0095 if (unlikely(faulted))
0096 return -EFAULT;
0097
0098 ip += 4;
0099 safe_store_code(new_code2, ip, faulted);
0100 if (unlikely(faulted))
0101 return -EFAULT;
0102
0103 ip -= 4;
0104 flush_icache_range(ip, ip + 8);
0105
0106 return 0;
0107 }
0108
0109 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
0110 unsigned int new_code2)
0111 {
0112 int faulted;
0113
0114 ip += 4;
0115 safe_store_code(new_code2, ip, faulted);
0116 if (unlikely(faulted))
0117 return -EFAULT;
0118
0119 ip -= 4;
0120 safe_store_code(new_code1, ip, faulted);
0121 if (unlikely(faulted))
0122 return -EFAULT;
0123
0124 flush_icache_range(ip, ip + 8);
0125
0126 return 0;
0127 }
0128 #endif
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
0161
0162 int ftrace_make_nop(struct module *mod,
0163 struct dyn_ftrace *rec, unsigned long addr)
0164 {
0165 unsigned int new;
0166 unsigned long ip = rec->ip;
0167
0168
0169
0170
0171
0172 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
0173 #ifdef CONFIG_64BIT
0174 return ftrace_modify_code(ip, new);
0175 #else
0176
0177
0178
0179
0180
0181
0182
0183 return ftrace_modify_code_2(ip, new, INSN_NOP);
0184 #endif
0185 }
0186
0187 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
0188 {
0189 unsigned int new;
0190 unsigned long ip = rec->ip;
0191
0192 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
0193
0194 #ifdef CONFIG_64BIT
0195 return ftrace_modify_code(ip, new);
0196 #else
0197 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
0198 INSN_NOP : insn_la_mcount[1]);
0199 #endif
0200 }
0201
0202 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
0203
0204 int ftrace_update_ftrace_func(ftrace_func_t func)
0205 {
0206 unsigned int new;
0207
0208 new = INSN_JAL((unsigned long)func);
0209
0210 return ftrace_modify_code(FTRACE_CALL_IP, new);
0211 }
0212
0213 int __init ftrace_dyn_arch_init(void)
0214 {
0215
0216 ftrace_dyn_arch_init_insns();
0217
0218
0219 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
0220
0221 return 0;
0222 }
0223 #endif
0224
0225 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0226
0227 #ifdef CONFIG_DYNAMIC_FTRACE
0228
0229 extern void ftrace_graph_call(void);
0230 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
0231
0232 int ftrace_enable_ftrace_graph_caller(void)
0233 {
0234 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
0235 insn_j_ftrace_graph_caller);
0236 }
0237
0238 int ftrace_disable_ftrace_graph_caller(void)
0239 {
0240 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
0241 }
0242
0243 #endif
0244
0245 #ifndef KBUILD_MCOUNT_RA_ADDRESS
0246
0247 #define S_RA_SP (0xafbf << 16)
0248 #define S_R_SP (0xafb0 << 16)
0249 #define OFFSET_MASK 0xffff
0250
0251 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
0252 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
0253 {
0254 unsigned long sp, ip, tmp;
0255 unsigned int code;
0256 int faulted;
0257
0258
0259
0260
0261
0262
0263 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
0264
0265
0266
0267
0268
0269 do {
0270
0271 safe_load_code(code, ip, faulted);
0272
0273 if (unlikely(faulted))
0274 return 0;
0275
0276
0277
0278
0279
0280 if ((code & S_R_SP) != S_R_SP)
0281 return parent_ra_addr;
0282
0283
0284 ip -= 4;
0285 } while ((code & S_RA_SP) != S_RA_SP);
0286
0287 sp = fp + (code & OFFSET_MASK);
0288
0289
0290 safe_load_stack(tmp, sp, faulted);
0291 if (unlikely(faulted))
0292 return 0;
0293
0294 if (tmp == old_parent_ra)
0295 return sp;
0296 return 0;
0297 }
0298
0299 #endif
0300
0301
0302
0303
0304
0305 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
0306 unsigned long fp)
0307 {
0308 unsigned long old_parent_ra;
0309 unsigned long return_hooker = (unsigned long)
0310 &return_to_handler;
0311 int faulted, insns;
0312
0313 if (unlikely(ftrace_graph_is_dead()))
0314 return;
0315
0316 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
0317 return;
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
0339 if (unlikely(faulted))
0340 goto out;
0341 #ifndef KBUILD_MCOUNT_RA_ADDRESS
0342 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
0343 old_parent_ra, (unsigned long)parent_ra_addr, fp);
0344
0345
0346
0347
0348 if (parent_ra_addr == NULL)
0349 goto out;
0350 #endif
0351
0352 safe_store_stack(return_hooker, parent_ra_addr, faulted);
0353 if (unlikely(faulted))
0354 goto out;
0355
0356
0357
0358
0359
0360
0361
0362 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
0363 self_ra -= (MCOUNT_INSN_SIZE * insns);
0364
0365 if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
0366 *parent_ra_addr = old_parent_ra;
0367 return;
0368 out:
0369 ftrace_graph_stop();
0370 WARN_ON(1);
0371 }
0372 #endif
0373
0374 #ifdef CONFIG_FTRACE_SYSCALLS
0375
0376 #ifdef CONFIG_32BIT
0377 unsigned long __init arch_syscall_addr(int nr)
0378 {
0379 return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
0380 }
0381 #endif
0382
0383 #ifdef CONFIG_64BIT
0384
0385 unsigned long __init arch_syscall_addr(int nr)
0386 {
0387 #ifdef CONFIG_MIPS32_N32
0388 if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
0389 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
0390 #endif
0391 if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
0392 return (unsigned long)sys_call_table[nr - __NR_64_Linux];
0393 #ifdef CONFIG_MIPS32_O32
0394 if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
0395 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
0396 #endif
0397
0398 return (unsigned long) &sys_ni_syscall;
0399 }
0400 #endif
0401
0402 #endif