Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 
0003 #define pr_fmt(fmt) "kprobes: " fmt
0004 
0005 #include <linux/kprobes.h>
0006 #include <linux/extable.h>
0007 #include <linux/slab.h>
0008 #include <linux/stop_machine.h>
0009 #include <asm/ptrace.h>
0010 #include <linux/uaccess.h>
0011 #include <asm/sections.h>
0012 #include <asm/cacheflush.h>
0013 
0014 #include "decode-insn.h"
0015 
0016 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
0017 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
0018 
0019 static void __kprobes
0020 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
0021 
0022 struct csky_insn_patch {
0023     kprobe_opcode_t *addr;
0024     u32     opcode;
0025     atomic_t    cpu_count;
0026 };
0027 
0028 static int __kprobes patch_text_cb(void *priv)
0029 {
0030     struct csky_insn_patch *param = priv;
0031     unsigned int addr = (unsigned int)param->addr;
0032 
0033     if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
0034         *(u16 *) addr = cpu_to_le16(param->opcode);
0035         dcache_wb_range(addr, addr + 2);
0036         atomic_inc(&param->cpu_count);
0037     } else {
0038         while (atomic_read(&param->cpu_count) <= num_online_cpus())
0039             cpu_relax();
0040     }
0041 
0042     icache_inv_range(addr, addr + 2);
0043 
0044     return 0;
0045 }
0046 
0047 static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
0048 {
0049     struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
0050 
0051     return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
0052 }
0053 
0054 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
0055 {
0056     unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
0057 
0058     p->ainsn.api.restore = (unsigned long)p->addr + offset;
0059 
0060     patch_text(p->ainsn.api.insn, p->opcode);
0061 }
0062 
0063 static void __kprobes arch_prepare_simulate(struct kprobe *p)
0064 {
0065     p->ainsn.api.restore = 0;
0066 }
0067 
0068 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
0069 {
0070     struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
0071 
0072     if (p->ainsn.api.handler)
0073         p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
0074 
0075     post_kprobe_handler(kcb, regs);
0076 }
0077 
0078 int __kprobes arch_prepare_kprobe(struct kprobe *p)
0079 {
0080     unsigned long probe_addr = (unsigned long)p->addr;
0081 
0082     if (probe_addr & 0x1)
0083         return -EILSEQ;
0084 
0085     /* copy instruction */
0086     p->opcode = le32_to_cpu(*p->addr);
0087 
0088     /* decode instruction */
0089     switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
0090     case INSN_REJECTED: /* insn not supported */
0091         return -EINVAL;
0092 
0093     case INSN_GOOD_NO_SLOT: /* insn need simulation */
0094         p->ainsn.api.insn = NULL;
0095         break;
0096 
0097     case INSN_GOOD: /* instruction uses slot */
0098         p->ainsn.api.insn = get_insn_slot();
0099         if (!p->ainsn.api.insn)
0100             return -ENOMEM;
0101         break;
0102     }
0103 
0104     /* prepare the instruction */
0105     if (p->ainsn.api.insn)
0106         arch_prepare_ss_slot(p);
0107     else
0108         arch_prepare_simulate(p);
0109 
0110     return 0;
0111 }
0112 
0113 /* install breakpoint in text */
0114 void __kprobes arch_arm_kprobe(struct kprobe *p)
0115 {
0116     patch_text(p->addr, USR_BKPT);
0117 }
0118 
0119 /* remove breakpoint from text */
0120 void __kprobes arch_disarm_kprobe(struct kprobe *p)
0121 {
0122     patch_text(p->addr, p->opcode);
0123 }
0124 
0125 void __kprobes arch_remove_kprobe(struct kprobe *p)
0126 {
0127     if (p->ainsn.api.insn) {
0128         free_insn_slot(p->ainsn.api.insn, 0);
0129         p->ainsn.api.insn = NULL;
0130     }
0131 }
0132 
0133 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
0134 {
0135     kcb->prev_kprobe.kp = kprobe_running();
0136     kcb->prev_kprobe.status = kcb->kprobe_status;
0137 }
0138 
0139 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
0140 {
0141     __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
0142     kcb->kprobe_status = kcb->prev_kprobe.status;
0143 }
0144 
0145 static void __kprobes set_current_kprobe(struct kprobe *p)
0146 {
0147     __this_cpu_write(current_kprobe, p);
0148 }
0149 
0150 /*
0151  * Interrupts need to be disabled before single-step mode is set, and not
0152  * reenabled until after single-step mode ends.
0153  * Without disabling interrupt on local CPU, there is a chance of
0154  * interrupt occurrence in the period of exception return and  start of
0155  * out-of-line single-step, that result in wrongly single stepping
0156  * into the interrupt handler.
0157  */
0158 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
0159                         struct pt_regs *regs)
0160 {
0161     kcb->saved_sr = regs->sr;
0162     regs->sr &= ~BIT(6);
0163 }
0164 
0165 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
0166                         struct pt_regs *regs)
0167 {
0168     regs->sr = kcb->saved_sr;
0169 }
0170 
0171 static void __kprobes
0172 set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
0173 {
0174     unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
0175 
0176     kcb->ss_ctx.ss_pending = true;
0177     kcb->ss_ctx.match_addr = addr + offset;
0178 }
0179 
0180 static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
0181 {
0182     kcb->ss_ctx.ss_pending = false;
0183     kcb->ss_ctx.match_addr = 0;
0184 }
0185 
0186 #define TRACE_MODE_SI       BIT(14)
0187 #define TRACE_MODE_MASK     ~(0x3 << 14)
0188 #define TRACE_MODE_RUN      0
0189 
0190 static void __kprobes setup_singlestep(struct kprobe *p,
0191                        struct pt_regs *regs,
0192                        struct kprobe_ctlblk *kcb, int reenter)
0193 {
0194     unsigned long slot;
0195 
0196     if (reenter) {
0197         save_previous_kprobe(kcb);
0198         set_current_kprobe(p);
0199         kcb->kprobe_status = KPROBE_REENTER;
0200     } else {
0201         kcb->kprobe_status = KPROBE_HIT_SS;
0202     }
0203 
0204     if (p->ainsn.api.insn) {
0205         /* prepare for single stepping */
0206         slot = (unsigned long)p->ainsn.api.insn;
0207 
0208         set_ss_context(kcb, slot, p);   /* mark pending ss */
0209 
0210         /* IRQs and single stepping do not mix well. */
0211         kprobes_save_local_irqflag(kcb, regs);
0212         regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
0213         instruction_pointer_set(regs, slot);
0214     } else {
0215         /* insn simulation */
0216         arch_simulate_insn(p, regs);
0217     }
0218 }
0219 
0220 static int __kprobes reenter_kprobe(struct kprobe *p,
0221                     struct pt_regs *regs,
0222                     struct kprobe_ctlblk *kcb)
0223 {
0224     switch (kcb->kprobe_status) {
0225     case KPROBE_HIT_SSDONE:
0226     case KPROBE_HIT_ACTIVE:
0227         kprobes_inc_nmissed_count(p);
0228         setup_singlestep(p, regs, kcb, 1);
0229         break;
0230     case KPROBE_HIT_SS:
0231     case KPROBE_REENTER:
0232         pr_warn("Failed to recover from reentered kprobes.\n");
0233         dump_kprobe(p);
0234         BUG();
0235         break;
0236     default:
0237         WARN_ON(1);
0238         return 0;
0239     }
0240 
0241     return 1;
0242 }
0243 
0244 static void __kprobes
0245 post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
0246 {
0247     struct kprobe *cur = kprobe_running();
0248 
0249     if (!cur)
0250         return;
0251 
0252     /* return addr restore if non-branching insn */
0253     if (cur->ainsn.api.restore != 0)
0254         regs->pc = cur->ainsn.api.restore;
0255 
0256     /* restore back original saved kprobe variables and continue */
0257     if (kcb->kprobe_status == KPROBE_REENTER) {
0258         restore_previous_kprobe(kcb);
0259         return;
0260     }
0261 
0262     /* call post handler */
0263     kcb->kprobe_status = KPROBE_HIT_SSDONE;
0264     if (cur->post_handler)  {
0265         /* post_handler can hit breakpoint and single step
0266          * again, so we enable D-flag for recursive exception.
0267          */
0268         cur->post_handler(cur, regs, 0);
0269     }
0270 
0271     reset_current_kprobe();
0272 }
0273 
0274 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
0275 {
0276     struct kprobe *cur = kprobe_running();
0277     struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
0278 
0279     switch (kcb->kprobe_status) {
0280     case KPROBE_HIT_SS:
0281     case KPROBE_REENTER:
0282         /*
0283          * We are here because the instruction being single
0284          * stepped caused a page fault. We reset the current
0285          * kprobe and the ip points back to the probe address
0286          * and allow the page fault handler to continue as a
0287          * normal page fault.
0288          */
0289         regs->pc = (unsigned long) cur->addr;
0290         BUG_ON(!instruction_pointer(regs));
0291 
0292         if (kcb->kprobe_status == KPROBE_REENTER)
0293             restore_previous_kprobe(kcb);
0294         else
0295             reset_current_kprobe();
0296 
0297         break;
0298     case KPROBE_HIT_ACTIVE:
0299     case KPROBE_HIT_SSDONE:
0300         /*
0301          * In case the user-specified fault handler returned
0302          * zero, try to fix up.
0303          */
0304         if (fixup_exception(regs))
0305             return 1;
0306     }
0307     return 0;
0308 }
0309 
0310 int __kprobes
0311 kprobe_breakpoint_handler(struct pt_regs *regs)
0312 {
0313     struct kprobe *p, *cur_kprobe;
0314     struct kprobe_ctlblk *kcb;
0315     unsigned long addr = instruction_pointer(regs);
0316 
0317     kcb = get_kprobe_ctlblk();
0318     cur_kprobe = kprobe_running();
0319 
0320     p = get_kprobe((kprobe_opcode_t *) addr);
0321 
0322     if (p) {
0323         if (cur_kprobe) {
0324             if (reenter_kprobe(p, regs, kcb))
0325                 return 1;
0326         } else {
0327             /* Probe hit */
0328             set_current_kprobe(p);
0329             kcb->kprobe_status = KPROBE_HIT_ACTIVE;
0330 
0331             /*
0332              * If we have no pre-handler or it returned 0, we
0333              * continue with normal processing.  If we have a
0334              * pre-handler and it returned non-zero, it will
0335              * modify the execution path and no need to single
0336              * stepping. Let's just reset current kprobe and exit.
0337              *
0338              * pre_handler can hit a breakpoint and can step thru
0339              * before return.
0340              */
0341             if (!p->pre_handler || !p->pre_handler(p, regs))
0342                 setup_singlestep(p, regs, kcb, 0);
0343             else
0344                 reset_current_kprobe();
0345         }
0346         return 1;
0347     }
0348 
0349     /*
0350      * The breakpoint instruction was removed right
0351      * after we hit it.  Another cpu has removed
0352      * either a probepoint or a debugger breakpoint
0353      * at this address.  In either case, no further
0354      * handling of this interrupt is appropriate.
0355      * Return back to original instruction, and continue.
0356      */
0357     return 0;
0358 }
0359 
0360 int __kprobes
0361 kprobe_single_step_handler(struct pt_regs *regs)
0362 {
0363     struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
0364 
0365     if ((kcb->ss_ctx.ss_pending)
0366         && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
0367         clear_ss_context(kcb);  /* clear pending ss */
0368 
0369         kprobes_restore_local_irqflag(kcb, regs);
0370         regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
0371 
0372         post_kprobe_handler(kcb, regs);
0373         return 1;
0374     }
0375     return 0;
0376 }
0377 
0378 /*
0379  * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
0380  * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
0381  */
0382 int __init arch_populate_kprobe_blacklist(void)
0383 {
0384     int ret;
0385 
0386     ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
0387                     (unsigned long)__irqentry_text_end);
0388     return ret;
0389 }
0390 
0391 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
0392 {
0393     return (void *)kretprobe_trampoline_handler(regs, NULL);
0394 }
0395 
0396 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
0397                       struct pt_regs *regs)
0398 {
0399     ri->ret_addr = (kprobe_opcode_t *)regs->lr;
0400     ri->fp = NULL;
0401     regs->lr = (unsigned long) &__kretprobe_trampoline;
0402 }
0403 
0404 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
0405 {
0406     return 0;
0407 }
0408 
0409 int __init arch_init_kprobes(void)
0410 {
0411     return 0;
0412 }