0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kprobes.h>
0009 #include <linux/jump_label.h>
0010 #include <linux/types.h>
0011 #include <linux/slab.h>
0012 #include <linux/list.h>
0013 #include <asm/kprobes.h>
0014 #include <asm/ptrace.h>
0015 #include <asm/cacheflush.h>
0016 #include <asm/code-patching.h>
0017 #include <asm/sstep.h>
0018 #include <asm/ppc-opcode.h>
0019 #include <asm/inst.h>
0020
0021 #define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
0022 #define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
0023 #define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
0024 #define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
0025 #define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
0026 #define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
0027
0028 static bool insn_page_in_use;
0029
0030 void *alloc_optinsn_page(void)
0031 {
0032 if (insn_page_in_use)
0033 return NULL;
0034 insn_page_in_use = true;
0035 return &optinsn_slot;
0036 }
0037
0038 void free_optinsn_page(void *page)
0039 {
0040 insn_page_in_use = false;
0041 }
0042
0043
0044
0045
0046
0047 static unsigned long can_optimize(struct kprobe *p)
0048 {
0049 struct pt_regs regs;
0050 struct instruction_op op;
0051 unsigned long nip = 0;
0052 unsigned long addr = (unsigned long)p->addr;
0053
0054
0055
0056
0057
0058
0059 if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
0060 return addr + sizeof(kprobe_opcode_t);
0061
0062
0063
0064
0065
0066
0067
0068 if (!is_kernel_addr(addr))
0069 return 0;
0070
0071 memset(®s, 0, sizeof(struct pt_regs));
0072 regs.nip = addr;
0073 regs.trap = 0x0;
0074 regs.msr = MSR_KERNEL;
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
0088 analyse_instr(&op, ®s, ppc_inst_read(p->ainsn.insn)) == 1) {
0089 emulate_update_regs(®s, &op);
0090 nip = regs.nip;
0091 }
0092
0093 return nip;
0094 }
0095
0096 static void optimized_callback(struct optimized_kprobe *op,
0097 struct pt_regs *regs)
0098 {
0099
0100 if (kprobe_disabled(&op->kp))
0101 return;
0102
0103 preempt_disable();
0104
0105 if (kprobe_running()) {
0106 kprobes_inc_nmissed_count(&op->kp);
0107 } else {
0108 __this_cpu_write(current_kprobe, &op->kp);
0109 regs_set_return_ip(regs, (unsigned long)op->kp.addr);
0110 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
0111 opt_pre_handler(&op->kp, regs);
0112 __this_cpu_write(current_kprobe, NULL);
0113 }
0114
0115 preempt_enable_no_resched();
0116 }
0117 NOKPROBE_SYMBOL(optimized_callback);
0118
0119 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
0120 {
0121 if (op->optinsn.insn) {
0122 free_optinsn_slot(op->optinsn.insn, 1);
0123 op->optinsn.insn = NULL;
0124 }
0125 }
0126
0127 static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
0128 {
0129 patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
0130 patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
0131 }
0132
0133
0134
0135
0136
0137 static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
0138 {
0139 patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
0140 patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
0141 patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
0142 patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
0143 patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
0144 }
0145
0146 static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
0147 {
0148 if (IS_ENABLED(CONFIG_PPC64))
0149 patch_imm64_load_insns(val, reg, addr);
0150 else
0151 patch_imm32_load_insns(val, reg, addr);
0152 }
0153
0154 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
0155 {
0156 ppc_inst_t branch_op_callback, branch_emulate_step, temp;
0157 unsigned long op_callback_addr, emulate_step_addr;
0158 kprobe_opcode_t *buff;
0159 long b_offset;
0160 unsigned long nip, size;
0161 int rc, i;
0162
0163 nip = can_optimize(p);
0164 if (!nip)
0165 return -EILSEQ;
0166
0167
0168 buff = get_optinsn_slot();
0169 if (!buff)
0170 return -ENOMEM;
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181 b_offset = (unsigned long)buff - (unsigned long)p->addr;
0182 if (!is_offset_in_branch_range(b_offset))
0183 goto error;
0184
0185
0186 b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
0187 if (!is_offset_in_branch_range(b_offset))
0188 goto error;
0189
0190
0191
0192 size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
0193 pr_devel("Copying template to %p, size %lu\n", buff, size);
0194 for (i = 0; i < size; i++) {
0195 rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
0196 if (rc < 0)
0197 goto error;
0198 }
0199
0200
0201
0202
0203
0204 patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
0205
0206
0207
0208
0209 op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
0210 emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
0211 if (!op_callback_addr || !emulate_step_addr) {
0212 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
0213 goto error;
0214 }
0215
0216 rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
0217 op_callback_addr, BRANCH_SET_LINK);
0218
0219 rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
0220 emulate_step_addr, BRANCH_SET_LINK);
0221
0222 if (rc)
0223 goto error;
0224
0225 patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
0226 patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
0227
0228
0229
0230
0231 temp = ppc_inst_read(p->ainsn.insn);
0232 patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
0233
0234
0235
0236
0237 patch_branch(buff + TMPL_RET_IDX, nip, 0);
0238
0239 flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
0240
0241 op->optinsn.insn = buff;
0242
0243 return 0;
0244
0245 error:
0246 free_optinsn_slot(buff, 0);
0247 return -ERANGE;
0248
0249 }
0250
0251 int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
0252 {
0253 return optinsn->insn != NULL;
0254 }
0255
0256
0257
0258
0259
0260
0261 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
0262 {
0263 return 0;
0264 }
0265
0266 void arch_optimize_kprobes(struct list_head *oplist)
0267 {
0268 ppc_inst_t instr;
0269 struct optimized_kprobe *op;
0270 struct optimized_kprobe *tmp;
0271
0272 list_for_each_entry_safe(op, tmp, oplist, list) {
0273
0274
0275
0276
0277 memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
0278 create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
0279 patch_instruction(op->kp.addr, instr);
0280 list_del_init(&op->list);
0281 }
0282 }
0283
0284 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
0285 {
0286 arch_arm_kprobe(&op->kp);
0287 }
0288
0289 void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
0290 {
0291 struct optimized_kprobe *op;
0292 struct optimized_kprobe *tmp;
0293
0294 list_for_each_entry_safe(op, tmp, oplist, list) {
0295 arch_unoptimize_kprobe(op);
0296 list_move(&op->list, done_list);
0297 }
0298 }
0299
0300 int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
0301 {
0302 return (op->kp.addr <= addr &&
0303 op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
0304 }