Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/err.h>
0003 #include <linux/slab.h>
0004 #include <linux/mm_types.h>
0005 #include <linux/sched/task.h>
0006 
0007 #include <asm/branch.h>
0008 #include <asm/cacheflush.h>
0009 #include <asm/fpu_emulator.h>
0010 #include <asm/inst.h>
0011 #include <asm/mipsregs.h>
0012 #include <linux/uaccess.h>
0013 
0014 /**
0015  * struct emuframe - The 'emulation' frame structure
0016  * @emul:   The instruction to 'emulate'.
0017  * @badinst:    A break instruction to cause a return to the kernel.
0018  *
0019  * This structure defines the frames placed within the delay slot emulation
0020  * page in response to a call to mips_dsemul(). Each thread may be allocated
0021  * only one frame at any given time. The kernel stores within it the
0022  * instruction to be 'emulated' followed by a break instruction, then
0023  * executes the frame in user mode. The break causes a trap to the kernel
0024  * which leads to do_dsemulret() being called unless the instruction in
0025  * @emul causes a trap itself, is a branch, or a signal is delivered to
0026  * the thread. In these cases the allocated frame will either be reused by
0027  * a subsequent delay slot 'emulation', or be freed during signal delivery or
0028  * upon thread exit.
0029  *
0030  * This approach is used because:
0031  *
0032  * - Actually emulating all instructions isn't feasible. We would need to
0033  *   be able to handle instructions from all revisions of the MIPS ISA,
0034  *   all ASEs & all vendor instruction set extensions. This would be a
0035  *   whole lot of work & continual maintenance burden as new instructions
0036  *   are introduced, and in the case of some vendor extensions may not
0037  *   even be possible. Thus we need to take the approach of actually
0038  *   executing the instruction.
0039  *
0040  * - We must execute the instruction within user context. If we were to
0041  *   execute the instruction in kernel mode then it would have access to
0042  *   kernel resources without very careful checks, leaving us with a
0043  *   high potential for security or stability issues to arise.
0044  *
0045  * - We used to place the frame on the users stack, but this requires
0046  *   that the stack be executable. This is bad for security so the
0047  *   per-process page is now used instead.
0048  *
0049  * - The instruction in @emul may be something entirely invalid for a
0050  *   delay slot. The user may (intentionally or otherwise) place a branch
0051  *   in a delay slot, or a kernel mode instruction, or something else
0052  *   which generates an exception. Thus we can't rely upon the break in
0053  *   @badinst always being hit. For this reason we track the index of the
0054  *   frame allocated to each thread, allowing us to clean it up at later
0055  *   points such as signal delivery or thread exit.
0056  *
0057  * - The user may generate a fake struct emuframe if they wish, invoking
0058  *   the BRK_MEMU break instruction themselves. We must therefore not
0059  *   trust that BRK_MEMU means there's actually a valid frame allocated
0060  *   to the thread, and must not allow the user to do anything they
0061  *   couldn't already.
0062  */
0063 struct emuframe {
0064     mips_instruction    emul;
0065     mips_instruction    badinst;
0066 };
0067 
0068 static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
0069 
0070 static inline __user struct emuframe *dsemul_page(void)
0071 {
0072     return (__user struct emuframe *)STACK_TOP;
0073 }
0074 
0075 static int alloc_emuframe(void)
0076 {
0077     mm_context_t *mm_ctx = &current->mm->context;
0078     int idx;
0079 
0080 retry:
0081     spin_lock(&mm_ctx->bd_emupage_lock);
0082 
0083     /* Ensure we have an allocation bitmap */
0084     if (!mm_ctx->bd_emupage_allocmap) {
0085         mm_ctx->bd_emupage_allocmap = bitmap_zalloc(emupage_frame_count,
0086                                 GFP_ATOMIC);
0087         if (!mm_ctx->bd_emupage_allocmap) {
0088             idx = BD_EMUFRAME_NONE;
0089             goto out_unlock;
0090         }
0091     }
0092 
0093     /* Attempt to allocate a single bit/frame */
0094     idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
0095                       emupage_frame_count, 0);
0096     if (idx < 0) {
0097         /*
0098          * Failed to allocate a frame. We'll wait until one becomes
0099          * available. We unlock the page so that other threads actually
0100          * get the opportunity to free their frames, which means
0101          * technically the result of bitmap_full may be incorrect.
0102          * However the worst case is that we repeat all this and end up
0103          * back here again.
0104          */
0105         spin_unlock(&mm_ctx->bd_emupage_lock);
0106         if (!wait_event_killable(mm_ctx->bd_emupage_queue,
0107             !bitmap_full(mm_ctx->bd_emupage_allocmap,
0108                      emupage_frame_count)))
0109             goto retry;
0110 
0111         /* Received a fatal signal - just give in */
0112         return BD_EMUFRAME_NONE;
0113     }
0114 
0115     /* Success! */
0116     pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
0117 out_unlock:
0118     spin_unlock(&mm_ctx->bd_emupage_lock);
0119     return idx;
0120 }
0121 
0122 static void free_emuframe(int idx, struct mm_struct *mm)
0123 {
0124     mm_context_t *mm_ctx = &mm->context;
0125 
0126     spin_lock(&mm_ctx->bd_emupage_lock);
0127 
0128     pr_debug("free emuframe %d from %d\n", idx, current->pid);
0129     bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
0130 
0131     /* If some thread is waiting for a frame, now's its chance */
0132     wake_up(&mm_ctx->bd_emupage_queue);
0133 
0134     spin_unlock(&mm_ctx->bd_emupage_lock);
0135 }
0136 
0137 static bool within_emuframe(struct pt_regs *regs)
0138 {
0139     unsigned long base = (unsigned long)dsemul_page();
0140 
0141     if (regs->cp0_epc < base)
0142         return false;
0143     if (regs->cp0_epc >= (base + PAGE_SIZE))
0144         return false;
0145 
0146     return true;
0147 }
0148 
0149 bool dsemul_thread_cleanup(struct task_struct *tsk)
0150 {
0151     int fr_idx;
0152 
0153     /* Clear any allocated frame, retrieving its index */
0154     fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
0155 
0156     /* If no frame was allocated, we're done */
0157     if (fr_idx == BD_EMUFRAME_NONE)
0158         return false;
0159 
0160     task_lock(tsk);
0161 
0162     /* Free the frame that this thread had allocated */
0163     if (tsk->mm)
0164         free_emuframe(fr_idx, tsk->mm);
0165 
0166     task_unlock(tsk);
0167     return true;
0168 }
0169 
0170 bool dsemul_thread_rollback(struct pt_regs *regs)
0171 {
0172     struct emuframe __user *fr;
0173     int fr_idx;
0174 
0175     /* Do nothing if we're not executing from a frame */
0176     if (!within_emuframe(regs))
0177         return false;
0178 
0179     /* Find the frame being executed */
0180     fr_idx = atomic_read(&current->thread.bd_emu_frame);
0181     if (fr_idx == BD_EMUFRAME_NONE)
0182         return false;
0183     fr = &dsemul_page()[fr_idx];
0184 
0185     /*
0186      * If the PC is at the emul instruction, roll back to the branch. If
0187      * PC is at the badinst (break) instruction, we've already emulated the
0188      * instruction so progress to the continue PC. If it's anything else
0189      * then something is amiss & the user has branched into some other area
0190      * of the emupage - we'll free the allocated frame anyway.
0191      */
0192     if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
0193         regs->cp0_epc = current->thread.bd_emu_branch_pc;
0194     else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
0195         regs->cp0_epc = current->thread.bd_emu_cont_pc;
0196 
0197     atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
0198     free_emuframe(fr_idx, current->mm);
0199     return true;
0200 }
0201 
0202 void dsemul_mm_cleanup(struct mm_struct *mm)
0203 {
0204     mm_context_t *mm_ctx = &mm->context;
0205 
0206     bitmap_free(mm_ctx->bd_emupage_allocmap);
0207 }
0208 
0209 int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
0210         unsigned long branch_pc, unsigned long cont_pc)
0211 {
0212     int isa16 = get_isa16_mode(regs->cp0_epc);
0213     mips_instruction break_math;
0214     unsigned long fr_uaddr;
0215     struct emuframe fr;
0216     int fr_idx, ret;
0217 
0218     /* NOP is easy */
0219     if (ir == 0)
0220         return -1;
0221 
0222     /* microMIPS instructions */
0223     if (isa16) {
0224         union mips_instruction insn = { .word = ir };
0225 
0226         /* NOP16 aka MOVE16 $0, $0 */
0227         if ((ir >> 16) == MM_NOP16)
0228             return -1;
0229 
0230         /* ADDIUPC */
0231         if (insn.mm_a_format.opcode == mm_addiupc_op) {
0232             unsigned int rs;
0233             s32 v;
0234 
0235             rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
0236             v = regs->cp0_epc & ~3;
0237             v += insn.mm_a_format.simmediate << 2;
0238             regs->regs[rs] = (long)v;
0239             return -1;
0240         }
0241     }
0242 
0243     pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
0244 
0245     /* Allocate a frame if we don't already have one */
0246     fr_idx = atomic_read(&current->thread.bd_emu_frame);
0247     if (fr_idx == BD_EMUFRAME_NONE)
0248         fr_idx = alloc_emuframe();
0249     if (fr_idx == BD_EMUFRAME_NONE)
0250         return SIGBUS;
0251 
0252     /* Retrieve the appropriately encoded break instruction */
0253     break_math = BREAK_MATH(isa16);
0254 
0255     /* Write the instructions to the frame */
0256     if (isa16) {
0257         union mips_instruction _emul = {
0258             .halfword = { ir >> 16, ir }
0259         };
0260         union mips_instruction _badinst = {
0261             .halfword = { break_math >> 16, break_math }
0262         };
0263 
0264         fr.emul = _emul.word;
0265         fr.badinst = _badinst.word;
0266     } else {
0267         fr.emul = ir;
0268         fr.badinst = break_math;
0269     }
0270 
0271     /* Write the frame to user memory */
0272     fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
0273     ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
0274                 FOLL_FORCE | FOLL_WRITE);
0275     if (unlikely(ret != sizeof(fr))) {
0276         MIPS_FPU_EMU_INC_STATS(errors);
0277         free_emuframe(fr_idx, current->mm);
0278         return SIGBUS;
0279     }
0280 
0281     /* Record the PC of the branch, PC to continue from & frame index */
0282     current->thread.bd_emu_branch_pc = branch_pc;
0283     current->thread.bd_emu_cont_pc = cont_pc;
0284     atomic_set(&current->thread.bd_emu_frame, fr_idx);
0285 
0286     /* Change user register context to execute the frame */
0287     regs->cp0_epc = fr_uaddr | isa16;
0288 
0289     return 0;
0290 }
0291 
0292 bool do_dsemulret(struct pt_regs *xcp)
0293 {
0294     /* Cleanup the allocated frame, returning if there wasn't one */
0295     if (!dsemul_thread_cleanup(current)) {
0296         MIPS_FPU_EMU_INC_STATS(errors);
0297         return false;
0298     }
0299 
0300     /* Set EPC to return to post-branch instruction */
0301     xcp->cp0_epc = current->thread.bd_emu_cont_pc;
0302     pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
0303     MIPS_FPU_EMU_INC_STATS(ds_emul);
0304     return true;
0305 }