Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/highmem.h>
0003 #include <linux/kdebug.h>
0004 #include <linux/types.h>
0005 #include <linux/notifier.h>
0006 #include <linux/sched.h>
0007 #include <linux/uprobes.h>
0008 
0009 #include <asm/branch.h>
0010 #include <asm/cpu-features.h>
0011 #include <asm/ptrace.h>
0012 
0013 #include "probes-common.h"
0014 
0015 static inline int insn_has_delay_slot(const union mips_instruction insn)
0016 {
0017     return __insn_has_delay_slot(insn);
0018 }
0019 
0020 /**
0021  * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
0022  * @mm: the probed address space.
0023  * @arch_uprobe: the probepoint information.
0024  * @addr: virtual address at which to install the probepoint
0025  * Return 0 on success or a -ve number on error.
0026  */
0027 int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
0028     struct mm_struct *mm, unsigned long addr)
0029 {
0030     union mips_instruction inst;
0031 
0032     /*
0033      * For the time being this also blocks attempts to use uprobes with
0034      * MIPS16 and microMIPS.
0035      */
0036     if (addr & 0x03)
0037         return -EINVAL;
0038 
0039     inst.word = aup->insn[0];
0040 
0041     if (__insn_is_compact_branch(inst)) {
0042         pr_notice("Uprobes for compact branches are not supported\n");
0043         return -EINVAL;
0044     }
0045 
0046     aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
0047     aup->ixol[1] = UPROBE_BRK_UPROBE_XOL;       /* NOP  */
0048 
0049     return 0;
0050 }
0051 
0052 /**
0053  * is_trap_insn - check if the instruction is a trap variant
0054  * @insn: instruction to be checked.
0055  * Returns true if @insn is a trap variant.
0056  *
0057  * This definition overrides the weak definition in kernel/events/uprobes.c.
0058  * and is needed for the case where an architecture has multiple trap
0059  * instructions (like PowerPC or MIPS).  We treat BREAK just like the more
0060  * modern conditional trap instructions.
0061  */
0062 bool is_trap_insn(uprobe_opcode_t *insn)
0063 {
0064     union mips_instruction inst;
0065 
0066     inst.word = *insn;
0067 
0068     switch (inst.i_format.opcode) {
0069     case spec_op:
0070         switch (inst.r_format.func) {
0071         case break_op:
0072         case teq_op:
0073         case tge_op:
0074         case tgeu_op:
0075         case tlt_op:
0076         case tltu_op:
0077         case tne_op:
0078             return true;
0079         }
0080         break;
0081 
0082     case bcond_op:  /* Yes, really ...  */
0083         switch (inst.u_format.rt) {
0084         case teqi_op:
0085         case tgei_op:
0086         case tgeiu_op:
0087         case tlti_op:
0088         case tltiu_op:
0089         case tnei_op:
0090             return true;
0091         }
0092         break;
0093     }
0094 
0095     return false;
0096 }
0097 
0098 #define UPROBE_TRAP_NR  ULONG_MAX
0099 
0100 /*
0101  * arch_uprobe_pre_xol - prepare to execute out of line.
0102  * @auprobe: the probepoint information.
0103  * @regs: reflects the saved user state of current task.
0104  */
0105 int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
0106 {
0107     struct uprobe_task *utask = current->utask;
0108 
0109     /*
0110      * Now find the EPC where to resume after the breakpoint has been
0111      * dealt with.  This may require emulation of a branch.
0112      */
0113     aup->resume_epc = regs->cp0_epc + 4;
0114     if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
0115         __compute_return_epc_for_insn(regs,
0116             (union mips_instruction) aup->insn[0]);
0117         aup->resume_epc = regs->cp0_epc;
0118     }
0119     utask->autask.saved_trap_nr = current->thread.trap_nr;
0120     current->thread.trap_nr = UPROBE_TRAP_NR;
0121     regs->cp0_epc = current->utask->xol_vaddr;
0122 
0123     return 0;
0124 }
0125 
0126 int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
0127 {
0128     struct uprobe_task *utask = current->utask;
0129 
0130     current->thread.trap_nr = utask->autask.saved_trap_nr;
0131     regs->cp0_epc = aup->resume_epc;
0132 
0133     return 0;
0134 }
0135 
0136 /*
0137  * If xol insn itself traps and generates a signal(Say,
0138  * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
0139  * instruction jumps back to its own address. It is assumed that anything
0140  * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
0141  *
0142  * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
0143  * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
0144  * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
0145  */
0146 bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
0147 {
0148     if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
0149         return true;
0150 
0151     return false;
0152 }
0153 
0154 int arch_uprobe_exception_notify(struct notifier_block *self,
0155     unsigned long val, void *data)
0156 {
0157     struct die_args *args = data;
0158     struct pt_regs *regs = args->regs;
0159 
0160     /* regs == NULL is a kernel bug */
0161     if (WARN_ON(!regs))
0162         return NOTIFY_DONE;
0163 
0164     /* We are only interested in userspace traps */
0165     if (!user_mode(regs))
0166         return NOTIFY_DONE;
0167 
0168     switch (val) {
0169     case DIE_UPROBE:
0170         if (uprobe_pre_sstep_notifier(regs))
0171             return NOTIFY_STOP;
0172         break;
0173     case DIE_UPROBE_XOL:
0174         if (uprobe_post_sstep_notifier(regs))
0175             return NOTIFY_STOP;
0176         break;
0177     default:
0178         break;
0179     }
0180 
0181     return 0;
0182 }
0183 
0184 /*
0185  * This function gets called when XOL instruction either gets trapped or
0186  * the thread has a fatal signal. Reset the instruction pointer to its
0187  * probed address for the potential restart or for post mortem analysis.
0188  */
0189 void arch_uprobe_abort_xol(struct arch_uprobe *aup,
0190     struct pt_regs *regs)
0191 {
0192     struct uprobe_task *utask = current->utask;
0193 
0194     instruction_pointer_set(regs, utask->vaddr);
0195 }
0196 
0197 unsigned long arch_uretprobe_hijack_return_addr(
0198     unsigned long trampoline_vaddr, struct pt_regs *regs)
0199 {
0200     unsigned long ra;
0201 
0202     ra = regs->regs[31];
0203 
0204     /* Replace the return address with the trampoline address */
0205     regs->regs[31] = trampoline_vaddr;
0206 
0207     return ra;
0208 }
0209 
0210 /**
0211  * set_swbp - store breakpoint at a given address.
0212  * @auprobe: arch specific probepoint information.
0213  * @mm: the probed process address space.
0214  * @vaddr: the virtual address to insert the opcode.
0215  *
0216  * For mm @mm, store the breakpoint instruction at @vaddr.
0217  * Return 0 (success) or a negative errno.
0218  *
0219  * This version overrides the weak version in kernel/events/uprobes.c.
0220  * It is required to handle MIPS16 and microMIPS.
0221  */
0222 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
0223     unsigned long vaddr)
0224 {
0225     return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
0226 }
0227 
0228 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
0229                   void *src, unsigned long len)
0230 {
0231     unsigned long kaddr, kstart;
0232 
0233     /* Initialize the slot */
0234     kaddr = (unsigned long)kmap_atomic(page);
0235     kstart = kaddr + (vaddr & ~PAGE_MASK);
0236     memcpy((void *)kstart, src, len);
0237     flush_icache_range(kstart, kstart + len);
0238     kunmap_atomic((void *)kaddr);
0239 }
0240 
0241 /**
0242  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
0243  * @regs: Reflects the saved state of the task after it has hit a breakpoint
0244  * instruction.
0245  * Return the address of the breakpoint instruction.
0246  *
0247  * This overrides the weak version in kernel/events/uprobes.c.
0248  */
0249 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
0250 {
0251     return instruction_pointer(regs);
0252 }
0253 
0254 /*
0255  * See if the instruction can be emulated.
0256  * Returns true if instruction was emulated, false otherwise.
0257  *
0258  * For now we always emulate so this function just returns false.
0259  */
0260 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
0261 {
0262     return false;
0263 }