0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kprobes.h>
0011 #include <linux/jump_label.h>
0012 #include <asm/kprobes.h>
0013 #include <asm/cacheflush.h>
0014
0015 #include <asm/insn.h>
0016
0017 #include <asm/patch.h>
0018
0019 #include "core.h"
0020
0021
0022
0023
0024
0025
0026 #define ARM_REG_PC 15
0027 #define can_kprobe_direct_exec(m) (!test_bit(ARM_REG_PC, &(m)))
0028
0029
0030
0031
0032
0033 asm (
0034 ".global optprobe_template_entry\n"
0035 "optprobe_template_entry:\n"
0036 ".global optprobe_template_sub_sp\n"
0037 "optprobe_template_sub_sp:"
0038 " sub sp, sp, #0xff\n"
0039 " stmia sp, {r0 - r14} \n"
0040 ".global optprobe_template_add_sp\n"
0041 "optprobe_template_add_sp:"
0042 " add r3, sp, #0xff\n"
0043 " str r3, [sp, #52]\n"
0044 " mrs r4, cpsr\n"
0045 " str r4, [sp, #64]\n"
0046 " mov r1, sp\n"
0047 " ldr r0, 1f\n"
0048 " ldr r2, 2f\n"
0049
0050
0051
0052
0053
0054 " and r4, sp, #4\n"
0055 " sub sp, sp, r4\n"
0056 #if __LINUX_ARM_ARCH__ >= 5
0057 " blx r2\n"
0058 #else
0059 " mov lr, pc\n"
0060 " mov pc, r2\n"
0061 #endif
0062 " add sp, sp, r4\n"
0063 " ldr r1, [sp, #64]\n"
0064 " tst r1, #"__stringify(PSR_T_BIT)"\n"
0065 " ldrne r2, [sp, #60]\n"
0066 " orrne r2, #1\n"
0067 " strne r2, [sp, #60] @ set bit0 of PC for thumb\n"
0068 " msr cpsr_cxsf, r1\n"
0069 ".global optprobe_template_restore_begin\n"
0070 "optprobe_template_restore_begin:\n"
0071 " ldmia sp, {r0 - r15}\n"
0072 ".global optprobe_template_restore_orig_insn\n"
0073 "optprobe_template_restore_orig_insn:\n"
0074 " nop\n"
0075 ".global optprobe_template_restore_end\n"
0076 "optprobe_template_restore_end:\n"
0077 " nop\n"
0078 ".global optprobe_template_val\n"
0079 "optprobe_template_val:\n"
0080 "1: .long 0\n"
0081 ".global optprobe_template_call\n"
0082 "optprobe_template_call:\n"
0083 "2: .long 0\n"
0084 ".global optprobe_template_end\n"
0085 "optprobe_template_end:\n");
0086
0087 #define TMPL_VAL_IDX \
0088 ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
0089 #define TMPL_CALL_IDX \
0090 ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
0091 #define TMPL_END_IDX \
0092 ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
0093 #define TMPL_ADD_SP \
0094 ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
0095 #define TMPL_SUB_SP \
0096 ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
0097 #define TMPL_RESTORE_BEGIN \
0098 ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
0099 #define TMPL_RESTORE_ORIGN_INSN \
0100 ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
0101 #define TMPL_RESTORE_END \
0102 ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
0103
0104
0105
0106
0107
0108
0109 int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
0110 {
0111 return optinsn->insn != NULL;
0112 }
0113
0114
0115
0116
0117
0118
0119 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
0120 {
0121 return 0;
0122 }
0123
0124
0125 static int can_optimize(struct kprobe *kp)
0126 {
0127 if (kp->ainsn.stack_space < 0)
0128 return 0;
0129
0130
0131
0132
0133 if (kp->ainsn.stack_space > 255 - sizeof(struct pt_regs))
0134 return 0;
0135 return 1;
0136 }
0137
0138
0139 static void
0140 __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
0141 {
0142 if (op->optinsn.insn) {
0143 free_optinsn_slot(op->optinsn.insn, dirty);
0144 op->optinsn.insn = NULL;
0145 }
0146 }
0147
0148 extern void kprobe_handler(struct pt_regs *regs);
0149
0150 static void
0151 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
0152 {
0153 unsigned long flags;
0154 struct kprobe *p = &op->kp;
0155 struct kprobe_ctlblk *kcb;
0156
0157
0158 regs->ARM_pc = (unsigned long)op->kp.addr;
0159 regs->ARM_ORIG_r0 = ~0UL;
0160
0161 local_irq_save(flags);
0162 kcb = get_kprobe_ctlblk();
0163
0164 if (kprobe_running()) {
0165 kprobes_inc_nmissed_count(&op->kp);
0166 } else {
0167 __this_cpu_write(current_kprobe, &op->kp);
0168 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
0169 opt_pre_handler(&op->kp, regs);
0170 __this_cpu_write(current_kprobe, NULL);
0171 }
0172
0173
0174
0175
0176
0177 if (!p->ainsn.kprobe_direct_exec)
0178 op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
0179
0180 local_irq_restore(flags);
0181 }
0182 NOKPROBE_SYMBOL(optimized_callback)
0183
0184 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
0185 {
0186 kprobe_opcode_t *code;
0187 unsigned long rel_chk;
0188 unsigned long val;
0189 unsigned long stack_protect = sizeof(struct pt_regs);
0190
0191 if (!can_optimize(orig))
0192 return -EILSEQ;
0193
0194 code = get_optinsn_slot();
0195 if (!code)
0196 return -ENOMEM;
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 rel_chk = (unsigned long)((long)code -
0224 (long)orig->addr + 8) & 0xfe000003;
0225
0226 if ((rel_chk != 0) && (rel_chk != 0xfe000000)) {
0227
0228
0229
0230
0231
0232 free_optinsn_slot(code, 0);
0233 return -ERANGE;
0234 }
0235
0236
0237 memcpy(code, (unsigned long *)optprobe_template_entry,
0238 TMPL_END_IDX * sizeof(kprobe_opcode_t));
0239
0240
0241 BUG_ON(orig->ainsn.stack_space < 0);
0242
0243 stack_protect += orig->ainsn.stack_space;
0244
0245
0246 BUG_ON(stack_protect > 255);
0247
0248
0249 code[TMPL_SUB_SP] = __opcode_to_mem_arm(0xe24dd000 | stack_protect);
0250
0251 code[TMPL_ADD_SP] = __opcode_to_mem_arm(0xe28d3000 | stack_protect);
0252
0253
0254 val = (unsigned long)op;
0255 code[TMPL_VAL_IDX] = val;
0256
0257
0258 val = (unsigned long)optimized_callback;
0259 code[TMPL_CALL_IDX] = val;
0260
0261
0262 orig->ainsn.kprobe_direct_exec = false;
0263 if (can_kprobe_direct_exec(orig->ainsn.register_usage_flags)) {
0264 kprobe_opcode_t final_branch = arm_gen_branch(
0265 (unsigned long)(&code[TMPL_RESTORE_END]),
0266 (unsigned long)(op->kp.addr) + 4);
0267 if (final_branch != 0) {
0268
0269
0270
0271
0272 code[TMPL_RESTORE_BEGIN] = __opcode_to_mem_arm(0xe89d7fff);
0273
0274
0275 code[TMPL_RESTORE_ORIGN_INSN] = __opcode_to_mem_arm(orig->opcode);
0276
0277
0278 code[TMPL_RESTORE_END] = __opcode_to_mem_arm(final_branch);
0279 orig->ainsn.kprobe_direct_exec = true;
0280 }
0281 }
0282
0283 flush_icache_range((unsigned long)code,
0284 (unsigned long)(&code[TMPL_END_IDX]));
0285
0286
0287 op->optinsn.insn = code;
0288 return 0;
0289 }
0290
0291 void __kprobes arch_optimize_kprobes(struct list_head *oplist)
0292 {
0293 struct optimized_kprobe *op, *tmp;
0294
0295 list_for_each_entry_safe(op, tmp, oplist, list) {
0296 unsigned long insn;
0297 WARN_ON(kprobe_disabled(&op->kp));
0298
0299
0300
0301
0302
0303 memcpy(op->optinsn.copied_insn, op->kp.addr,
0304 RELATIVEJUMP_SIZE);
0305
0306 insn = arm_gen_branch((unsigned long)op->kp.addr,
0307 (unsigned long)op->optinsn.insn);
0308 BUG_ON(insn == 0);
0309
0310
0311
0312
0313
0314 insn = (__mem_to_opcode_arm(
0315 op->optinsn.copied_insn[0]) & 0xf0000000) |
0316 (insn & 0x0fffffff);
0317
0318
0319
0320
0321
0322
0323 kprobes_remove_breakpoint(op->kp.addr, insn);
0324
0325 list_del_init(&op->list);
0326 }
0327 }
0328
0329 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
0330 {
0331 arch_arm_kprobe(&op->kp);
0332 }
0333
0334
0335
0336
0337
0338 void arch_unoptimize_kprobes(struct list_head *oplist,
0339 struct list_head *done_list)
0340 {
0341 struct optimized_kprobe *op, *tmp;
0342
0343 list_for_each_entry_safe(op, tmp, oplist, list) {
0344 arch_unoptimize_kprobe(op);
0345 list_move(&op->list, done_list);
0346 }
0347 }
0348
0349 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
0350 kprobe_opcode_t *addr)
0351 {
0352 return (op->kp.addr <= addr &&
0353 op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
0354
0355 }
0356
0357 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
0358 {
0359 __arch_remove_optimized_kprobe(op, 1);
0360 }