Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  PowerPC version 
0004  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0005  *
0006  *  Derived from "arch/i386/kernel/signal.c"
0007  *    Copyright (C) 1991, 1992 Linus Torvalds
0008  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
0009  */
0010 
0011 #include <linux/sched.h>
0012 #include <linux/mm.h>
0013 #include <linux/smp.h>
0014 #include <linux/kernel.h>
0015 #include <linux/signal.h>
0016 #include <linux/errno.h>
0017 #include <linux/wait.h>
0018 #include <linux/unistd.h>
0019 #include <linux/stddef.h>
0020 #include <linux/elf.h>
0021 #include <linux/ptrace.h>
0022 #include <linux/ratelimit.h>
0023 #include <linux/syscalls.h>
0024 #include <linux/pagemap.h>
0025 
0026 #include <asm/sigcontext.h>
0027 #include <asm/ucontext.h>
0028 #include <linux/uaccess.h>
0029 #include <asm/unistd.h>
0030 #include <asm/cacheflush.h>
0031 #include <asm/syscalls.h>
0032 #include <asm/vdso.h>
0033 #include <asm/switch_to.h>
0034 #include <asm/tm.h>
0035 #include <asm/asm-prototypes.h>
0036 
0037 #include "signal.h"
0038 
0039 
0040 #define GP_REGS_SIZE    min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
0041 #define FP_REGS_SIZE    sizeof(elf_fpregset_t)
0042 
0043 #define TRAMP_TRACEBACK 4
0044 #define TRAMP_SIZE  7
0045 
0046 /*
0047  * When we have signals to deliver, we set up on the user stack,
0048  * going down from the original stack pointer:
0049  *  1) a rt_sigframe struct which contains the ucontext 
0050  *  2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller
0051  *     frame for the signal handler.
0052  */
0053 
0054 struct rt_sigframe {
0055     /* sys_rt_sigreturn requires the ucontext be the first field */
0056     struct ucontext uc;
0057 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0058     struct ucontext uc_transact;
0059 #endif
0060     unsigned long _unused[2];
0061     unsigned int tramp[TRAMP_SIZE];
0062     struct siginfo __user *pinfo;
0063     void __user *puc;
0064     struct siginfo info;
0065     /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
0066     char abigap[USER_REDZONE_SIZE];
0067 } __attribute__ ((aligned (16)));
0068 
0069 unsigned long get_min_sigframe_size_64(void)
0070 {
0071     return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE;
0072 }
0073 
0074 /*
0075  * This computes a quad word aligned pointer inside the vmx_reserve array
0076  * element. For historical reasons sigcontext might not be quad word aligned,
0077  * but the location we write the VMX regs to must be. See the comment in
0078  * sigcontext for more detail.
0079  */
0080 #ifdef CONFIG_ALTIVEC
0081 static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
0082 {
0083     return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
0084 }
0085 #endif
0086 
0087 static void prepare_setup_sigcontext(struct task_struct *tsk)
0088 {
0089 #ifdef CONFIG_ALTIVEC
0090     /* save altivec registers */
0091     if (tsk->thread.used_vr)
0092         flush_altivec_to_thread(tsk);
0093     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0094         tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
0095 #endif /* CONFIG_ALTIVEC */
0096 
0097     flush_fp_to_thread(tsk);
0098 
0099 #ifdef CONFIG_VSX
0100     if (tsk->thread.used_vsr)
0101         flush_vsx_to_thread(tsk);
0102 #endif /* CONFIG_VSX */
0103 }
0104 
0105 /*
0106  * Set up the sigcontext for the signal frame.
0107  */
0108 
0109 #define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\
0110 do {                                            \
0111     if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\
0112         goto label;                             \
0113 } while (0)
0114 static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
0115                     struct task_struct *tsk, int signr, sigset_t *set,
0116                     unsigned long handler, int ctx_has_vsx_region)
0117 {
0118     /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
0119      * process never used altivec yet (MSR_VEC is zero in pt_regs of
0120      * the context). This is very important because we must ensure we
0121      * don't lose the VRSAVE content that may have been set prior to
0122      * the process doing its first vector operation
0123      * Userland shall check AT_HWCAP to know whether it can rely on the
0124      * v_regs pointer or not
0125      */
0126 #ifdef CONFIG_ALTIVEC
0127     elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
0128 #endif
0129     struct pt_regs *regs = tsk->thread.regs;
0130     unsigned long msr = regs->msr;
0131     /* Force usr to always see softe as 1 (interrupts enabled) */
0132     unsigned long softe = 0x1;
0133 
0134     BUG_ON(tsk != current);
0135 
0136 #ifdef CONFIG_ALTIVEC
0137     unsafe_put_user(v_regs, &sc->v_regs, efault_out);
0138 
0139     /* save altivec registers */
0140     if (tsk->thread.used_vr) {
0141         /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
0142         unsafe_copy_to_user(v_regs, &tsk->thread.vr_state,
0143                     33 * sizeof(vector128), efault_out);
0144         /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
0145          * contains valid data.
0146          */
0147         msr |= MSR_VEC;
0148     }
0149     /* We always copy to/from vrsave, it's 0 if we don't have or don't
0150      * use altivec.
0151      */
0152     unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
0153 #else /* CONFIG_ALTIVEC */
0154     unsafe_put_user(0, &sc->v_regs, efault_out);
0155 #endif /* CONFIG_ALTIVEC */
0156     /* copy fpr regs and fpscr */
0157     unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out);
0158 
0159     /*
0160      * Clear the MSR VSX bit to indicate there is no valid state attached
0161      * to this context, except in the specific case below where we set it.
0162      */
0163     msr &= ~MSR_VSX;
0164 #ifdef CONFIG_VSX
0165     /*
0166      * Copy VSX low doubleword to local buffer for formatting,
0167      * then out to userspace.  Update v_regs to point after the
0168      * VMX data.
0169      */
0170     if (tsk->thread.used_vsr && ctx_has_vsx_region) {
0171         v_regs += ELF_NVRREG;
0172         unsafe_copy_vsx_to_user(v_regs, tsk, efault_out);
0173         /* set MSR_VSX in the MSR value in the frame to
0174          * indicate that sc->vs_reg) contains valid data.
0175          */
0176         msr |= MSR_VSX;
0177     }
0178 #endif /* CONFIG_VSX */
0179     unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out);
0180     unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out);
0181     unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out);
0182     unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out);
0183     unsafe_put_user(signr, &sc->signal, efault_out);
0184     unsafe_put_user(handler, &sc->handler, efault_out);
0185     if (set != NULL)
0186         unsafe_put_user(set->sig[0], &sc->oldmask, efault_out);
0187 
0188     return 0;
0189 
0190 efault_out:
0191     return -EFAULT;
0192 }
0193 
0194 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0195 /*
0196  * As above, but Transactional Memory is in use, so deliver sigcontexts
0197  * containing checkpointed and transactional register states.
0198  *
0199  * To do this, we treclaim (done before entering here) to gather both sets of
0200  * registers and set up the 'normal' sigcontext registers with rolled-back
0201  * register values such that a simple signal handler sees a correct
0202  * checkpointed register state.  If interested, a TM-aware sighandler can
0203  * examine the transactional registers in the 2nd sigcontext to determine the
0204  * real origin of the signal.
0205  */
0206 static long setup_tm_sigcontexts(struct sigcontext __user *sc,
0207                  struct sigcontext __user *tm_sc,
0208                  struct task_struct *tsk,
0209                  int signr, sigset_t *set, unsigned long handler,
0210                  unsigned long msr)
0211 {
0212     /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
0213      * process never used altivec yet (MSR_VEC is zero in pt_regs of
0214      * the context). This is very important because we must ensure we
0215      * don't lose the VRSAVE content that may have been set prior to
0216      * the process doing its first vector operation
0217      * Userland shall check AT_HWCAP to know wether it can rely on the
0218      * v_regs pointer or not.
0219      */
0220 #ifdef CONFIG_ALTIVEC
0221     elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
0222     elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
0223 #endif
0224     struct pt_regs *regs = tsk->thread.regs;
0225     long err = 0;
0226 
0227     BUG_ON(tsk != current);
0228 
0229     BUG_ON(!MSR_TM_ACTIVE(msr));
0230 
0231     WARN_ON(tm_suspend_disabled);
0232 
0233     /* Restore checkpointed FP, VEC, and VSX bits from ckpt_regs as
0234      * it contains the correct FP, VEC, VSX state after we treclaimed
0235      * the transaction and giveup_all() was called on reclaiming.
0236      */
0237     msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
0238 
0239 #ifdef CONFIG_ALTIVEC
0240     err |= __put_user(v_regs, &sc->v_regs);
0241     err |= __put_user(tm_v_regs, &tm_sc->v_regs);
0242 
0243     /* save altivec registers */
0244     if (tsk->thread.used_vr) {
0245         /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
0246         err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state,
0247                       33 * sizeof(vector128));
0248         /* If VEC was enabled there are transactional VRs valid too,
0249          * else they're a copy of the checkpointed VRs.
0250          */
0251         if (msr & MSR_VEC)
0252             err |= __copy_to_user(tm_v_regs,
0253                           &tsk->thread.vr_state,
0254                           33 * sizeof(vector128));
0255         else
0256             err |= __copy_to_user(tm_v_regs,
0257                           &tsk->thread.ckvr_state,
0258                           33 * sizeof(vector128));
0259 
0260         /* set MSR_VEC in the MSR value in the frame to indicate
0261          * that sc->v_reg contains valid data.
0262          */
0263         msr |= MSR_VEC;
0264     }
0265     /* We always copy to/from vrsave, it's 0 if we don't have or don't
0266      * use altivec.
0267      */
0268     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0269         tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE);
0270     err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]);
0271     if (msr & MSR_VEC)
0272         err |= __put_user(tsk->thread.vrsave,
0273                   (u32 __user *)&tm_v_regs[33]);
0274     else
0275         err |= __put_user(tsk->thread.ckvrsave,
0276                   (u32 __user *)&tm_v_regs[33]);
0277 
0278 #else /* CONFIG_ALTIVEC */
0279     err |= __put_user(0, &sc->v_regs);
0280     err |= __put_user(0, &tm_sc->v_regs);
0281 #endif /* CONFIG_ALTIVEC */
0282 
0283     /* copy fpr regs and fpscr */
0284     err |= copy_ckfpr_to_user(&sc->fp_regs, tsk);
0285     if (msr & MSR_FP)
0286         err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk);
0287     else
0288         err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk);
0289 
0290 #ifdef CONFIG_VSX
0291     /*
0292      * Copy VSX low doubleword to local buffer for formatting,
0293      * then out to userspace.  Update v_regs to point after the
0294      * VMX data.
0295      */
0296     if (tsk->thread.used_vsr) {
0297         v_regs += ELF_NVRREG;
0298         tm_v_regs += ELF_NVRREG;
0299 
0300         err |= copy_ckvsx_to_user(v_regs, tsk);
0301 
0302         if (msr & MSR_VSX)
0303             err |= copy_vsx_to_user(tm_v_regs, tsk);
0304         else
0305             err |= copy_ckvsx_to_user(tm_v_regs, tsk);
0306 
0307         /* set MSR_VSX in the MSR value in the frame to
0308          * indicate that sc->vs_reg) contains valid data.
0309          */
0310         msr |= MSR_VSX;
0311     }
0312 #endif /* CONFIG_VSX */
0313 
0314     err |= __put_user(&sc->gp_regs, &sc->regs);
0315     err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
0316     err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
0317     err |= __copy_to_user(&sc->gp_regs,
0318                   &tsk->thread.ckpt_regs, GP_REGS_SIZE);
0319     err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
0320     err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
0321     err |= __put_user(signr, &sc->signal);
0322     err |= __put_user(handler, &sc->handler);
0323     if (set != NULL)
0324         err |=  __put_user(set->sig[0], &sc->oldmask);
0325 
0326     return err;
0327 }
0328 #endif
0329 
0330 /*
0331  * Restore the sigcontext from the signal frame.
0332  */
0333 #define unsafe_restore_sigcontext(tsk, set, sig, sc, label) do {    \
0334     if (__unsafe_restore_sigcontext(tsk, set, sig, sc))     \
0335         goto label;                     \
0336 } while (0)
0337 static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set,
0338                         int sig, struct sigcontext __user *sc)
0339 {
0340 #ifdef CONFIG_ALTIVEC
0341     elf_vrreg_t __user *v_regs;
0342 #endif
0343     unsigned long save_r13 = 0;
0344     unsigned long msr;
0345     struct pt_regs *regs = tsk->thread.regs;
0346 #ifdef CONFIG_VSX
0347     int i;
0348 #endif
0349 
0350     BUG_ON(tsk != current);
0351 
0352     /* If this is not a signal return, we preserve the TLS in r13 */
0353     if (!sig)
0354         save_r13 = regs->gpr[13];
0355 
0356     /* copy the GPRs */
0357     unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), efault_out);
0358     unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out);
0359     /* get MSR separately, transfer the LE bit if doing signal return */
0360     unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out);
0361     if (sig)
0362         regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
0363     unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out);
0364     unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out);
0365     unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out);
0366     unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out);
0367     unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out);
0368     /* Don't allow userspace to set SOFTE */
0369     set_trap_norestart(regs);
0370     unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out);
0371     unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out);
0372     unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out);
0373 
0374     if (!sig)
0375         regs->gpr[13] = save_r13;
0376     if (set != NULL)
0377         unsafe_get_user(set->sig[0], &sc->oldmask, efault_out);
0378 
0379     /*
0380      * Force reload of FP/VEC/VSX so userspace sees any changes.
0381      * Clear these bits from the user process' MSR before copying into the
0382      * thread struct. If we are rescheduled or preempted and another task
0383      * uses FP/VEC/VSX, and this process has the MSR bits set, then the
0384      * context switch code will save the current CPU state into the
0385      * thread_struct - possibly overwriting the data we are updating here.
0386      */
0387     regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
0388 
0389 #ifdef CONFIG_ALTIVEC
0390     unsafe_get_user(v_regs, &sc->v_regs, efault_out);
0391     if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
0392         return -EFAULT;
0393     /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
0394     if (v_regs != NULL && (msr & MSR_VEC) != 0) {
0395         unsafe_copy_from_user(&tsk->thread.vr_state, v_regs,
0396                       33 * sizeof(vector128), efault_out);
0397         tsk->thread.used_vr = true;
0398     } else if (tsk->thread.used_vr) {
0399         memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
0400     }
0401     /* Always get VRSAVE back */
0402     if (v_regs != NULL)
0403         unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
0404     else
0405         tsk->thread.vrsave = 0;
0406     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0407         mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
0408 #endif /* CONFIG_ALTIVEC */
0409     /* restore floating point */
0410     unsafe_copy_fpr_from_user(tsk, &sc->fp_regs, efault_out);
0411 #ifdef CONFIG_VSX
0412     /*
0413      * Get additional VSX data. Update v_regs to point after the
0414      * VMX data.  Copy VSX low doubleword from userspace to local
0415      * buffer for formatting, then into the taskstruct.
0416      */
0417     v_regs += ELF_NVRREG;
0418     if ((msr & MSR_VSX) != 0) {
0419         unsafe_copy_vsx_from_user(tsk, v_regs, efault_out);
0420         tsk->thread.used_vsr = true;
0421     } else {
0422         for (i = 0; i < 32 ; i++)
0423             tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0424     }
0425 #endif
0426     return 0;
0427 
0428 efault_out:
0429     return -EFAULT;
0430 }
0431 
0432 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0433 /*
0434  * Restore the two sigcontexts from the frame of a transactional processes.
0435  */
0436 
0437 static long restore_tm_sigcontexts(struct task_struct *tsk,
0438                    struct sigcontext __user *sc,
0439                    struct sigcontext __user *tm_sc)
0440 {
0441 #ifdef CONFIG_ALTIVEC
0442     elf_vrreg_t __user *v_regs, *tm_v_regs;
0443 #endif
0444     unsigned long err = 0;
0445     unsigned long msr;
0446     struct pt_regs *regs = tsk->thread.regs;
0447 #ifdef CONFIG_VSX
0448     int i;
0449 #endif
0450 
0451     BUG_ON(tsk != current);
0452 
0453     if (tm_suspend_disabled)
0454         return -EINVAL;
0455 
0456     /* copy the GPRs */
0457     err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
0458     err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
0459                 sizeof(regs->gpr));
0460 
0461     /*
0462      * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP.
0463      * TEXASR was set by the signal delivery reclaim, as was TFIAR.
0464      * Users doing anything abhorrent like thread-switching w/ signals for
0465      * TM-Suspended code will have to back TEXASR/TFIAR up themselves.
0466      * For the case of getting a signal and simply returning from it,
0467      * we don't need to re-copy them here.
0468      */
0469     err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
0470     err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
0471 
0472     /* get MSR separately, transfer the LE bit if doing signal return */
0473     err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
0474     /* Don't allow reserved mode. */
0475     if (MSR_TM_RESV(msr))
0476         return -EINVAL;
0477 
0478     /* pull in MSR LE from user context */
0479     regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
0480 
0481     /* The following non-GPR non-FPR non-VR state is also checkpointed: */
0482     err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
0483     err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
0484     err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
0485     err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
0486     err |= __get_user(tsk->thread.ckpt_regs.ctr,
0487               &sc->gp_regs[PT_CTR]);
0488     err |= __get_user(tsk->thread.ckpt_regs.link,
0489               &sc->gp_regs[PT_LNK]);
0490     err |= __get_user(tsk->thread.ckpt_regs.xer,
0491               &sc->gp_regs[PT_XER]);
0492     err |= __get_user(tsk->thread.ckpt_regs.ccr,
0493               &sc->gp_regs[PT_CCR]);
0494     /* Don't allow userspace to set SOFTE */
0495     set_trap_norestart(regs);
0496     /* These regs are not checkpointed; they can go in 'regs'. */
0497     err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
0498     err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
0499     err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
0500 
0501     /*
0502      * Force reload of FP/VEC.
0503      * This has to be done before copying stuff into tsk->thread.fpr/vr
0504      * for the reasons explained in the previous comment.
0505      */
0506     regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
0507 
0508 #ifdef CONFIG_ALTIVEC
0509     err |= __get_user(v_regs, &sc->v_regs);
0510     err |= __get_user(tm_v_regs, &tm_sc->v_regs);
0511     if (err)
0512         return err;
0513     if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
0514         return -EFAULT;
0515     if (tm_v_regs && !access_ok(tm_v_regs, 34 * sizeof(vector128)))
0516         return -EFAULT;
0517     /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
0518     if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
0519         err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs,
0520                     33 * sizeof(vector128));
0521         err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs,
0522                     33 * sizeof(vector128));
0523         current->thread.used_vr = true;
0524     }
0525     else if (tsk->thread.used_vr) {
0526         memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
0527         memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128));
0528     }
0529     /* Always get VRSAVE back */
0530     if (v_regs != NULL && tm_v_regs != NULL) {
0531         err |= __get_user(tsk->thread.ckvrsave,
0532                   (u32 __user *)&v_regs[33]);
0533         err |= __get_user(tsk->thread.vrsave,
0534                   (u32 __user *)&tm_v_regs[33]);
0535     }
0536     else {
0537         tsk->thread.vrsave = 0;
0538         tsk->thread.ckvrsave = 0;
0539     }
0540     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0541         mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
0542 #endif /* CONFIG_ALTIVEC */
0543     /* restore floating point */
0544     err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs);
0545     err |= copy_ckfpr_from_user(tsk, &sc->fp_regs);
0546 #ifdef CONFIG_VSX
0547     /*
0548      * Get additional VSX data. Update v_regs to point after the
0549      * VMX data.  Copy VSX low doubleword from userspace to local
0550      * buffer for formatting, then into the taskstruct.
0551      */
0552     if (v_regs && ((msr & MSR_VSX) != 0)) {
0553         v_regs += ELF_NVRREG;
0554         tm_v_regs += ELF_NVRREG;
0555         err |= copy_vsx_from_user(tsk, tm_v_regs);
0556         err |= copy_ckvsx_from_user(tsk, v_regs);
0557         tsk->thread.used_vsr = true;
0558     } else {
0559         for (i = 0; i < 32 ; i++) {
0560             tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0561             tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0562         }
0563     }
0564 #endif
0565     tm_enable();
0566     /* Make sure the transaction is marked as failed */
0567     tsk->thread.tm_texasr |= TEXASR_FS;
0568 
0569     /*
0570      * Disabling preemption, since it is unsafe to be preempted
0571      * with MSR[TS] set without recheckpointing.
0572      */
0573     preempt_disable();
0574 
0575     /* pull in MSR TS bits from user context */
0576     regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
0577 
0578     /*
0579      * Ensure that TM is enabled in regs->msr before we leave the signal
0580      * handler. It could be the case that (a) user disabled the TM bit
0581      * through the manipulation of the MSR bits in uc_mcontext or (b) the
0582      * TM bit was disabled because a sufficient number of context switches
0583      * happened whilst in the signal handler and load_tm overflowed,
0584      * disabling the TM bit. In either case we can end up with an illegal
0585      * TM state leading to a TM Bad Thing when we return to userspace.
0586      *
0587      * CAUTION:
0588      * After regs->MSR[TS] being updated, make sure that get_user(),
0589      * put_user() or similar functions are *not* called. These
0590      * functions can generate page faults which will cause the process
0591      * to be de-scheduled with MSR[TS] set but without calling
0592      * tm_recheckpoint(). This can cause a bug.
0593      */
0594     regs_set_return_msr(regs, regs->msr | MSR_TM);
0595 
0596     /* This loads the checkpointed FP/VEC state, if used */
0597     tm_recheckpoint(&tsk->thread);
0598 
0599     msr_check_and_set(msr & (MSR_FP | MSR_VEC));
0600     if (msr & MSR_FP) {
0601         load_fp_state(&tsk->thread.fp_state);
0602         regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
0603     }
0604     if (msr & MSR_VEC) {
0605         load_vr_state(&tsk->thread.vr_state);
0606         regs_set_return_msr(regs, regs->msr | MSR_VEC);
0607     }
0608 
0609     preempt_enable();
0610 
0611     return err;
0612 }
0613 #else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
0614 static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc,
0615                    struct sigcontext __user *tm_sc)
0616 {
0617     return -EINVAL;
0618 }
0619 #endif
0620 
0621 /*
0622  * Setup the trampoline code on the stack
0623  */
0624 static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
0625 {
0626     int i;
0627     long err = 0;
0628 
0629     /* Call the handler and pop the dummy stackframe*/
0630     err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]);
0631     err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]);
0632 
0633     err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]);
0634     err |= __put_user(PPC_RAW_SC(), &tramp[3]);
0635 
0636     /* Minimal traceback info */
0637     for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
0638         err |= __put_user(0, &tramp[i]);
0639 
0640     if (!err)
0641         flush_icache_range((unsigned long) &tramp[0],
0642                (unsigned long) &tramp[TRAMP_SIZE]);
0643 
0644     return err;
0645 }
0646 
0647 /*
0648  * Userspace code may pass a ucontext which doesn't include VSX added
0649  * at the end.  We need to check for this case.
0650  */
0651 #define UCONTEXTSIZEWITHOUTVSX \
0652         (sizeof(struct ucontext) - 32*sizeof(long))
0653 
0654 /*
0655  * Handle {get,set,swap}_context operations
0656  */
0657 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
0658         struct ucontext __user *, new_ctx, long, ctx_size)
0659 {
0660     sigset_t set;
0661     unsigned long new_msr = 0;
0662     int ctx_has_vsx_region = 0;
0663 
0664     if (new_ctx &&
0665         get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
0666         return -EFAULT;
0667     /*
0668      * Check that the context is not smaller than the original
0669      * size (with VMX but without VSX)
0670      */
0671     if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
0672         return -EINVAL;
0673     /*
0674      * If the new context state sets the MSR VSX bits but
0675      * it doesn't provide VSX state.
0676      */
0677     if ((ctx_size < sizeof(struct ucontext)) &&
0678         (new_msr & MSR_VSX))
0679         return -EINVAL;
0680     /* Does the context have enough room to store VSX data? */
0681     if (ctx_size >= sizeof(struct ucontext))
0682         ctx_has_vsx_region = 1;
0683 
0684     if (old_ctx != NULL) {
0685         prepare_setup_sigcontext(current);
0686         if (!user_write_access_begin(old_ctx, ctx_size))
0687             return -EFAULT;
0688 
0689         unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL,
0690                     0, ctx_has_vsx_region, efault_out);
0691         unsafe_copy_to_user(&old_ctx->uc_sigmask, &current->blocked,
0692                     sizeof(sigset_t), efault_out);
0693 
0694         user_write_access_end();
0695     }
0696     if (new_ctx == NULL)
0697         return 0;
0698     if (!access_ok(new_ctx, ctx_size) ||
0699         fault_in_readable((char __user *)new_ctx, ctx_size))
0700         return -EFAULT;
0701 
0702     /*
0703      * If we get a fault copying the context into the kernel's
0704      * image of the user's registers, we can't just return -EFAULT
0705      * because the user's registers will be corrupted.  For instance
0706      * the NIP value may have been updated but not some of the
0707      * other registers.  Given that we have done the access_ok
0708      * and successfully read the first and last bytes of the region
0709      * above, this should only happen in an out-of-memory situation
0710      * or if another thread unmaps the region containing the context.
0711      * We kill the task with a SIGSEGV in this situation.
0712      */
0713 
0714     if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
0715         force_exit_sig(SIGSEGV);
0716         return -EFAULT;
0717     }
0718     set_current_blocked(&set);
0719 
0720     if (!user_read_access_begin(new_ctx, ctx_size))
0721         return -EFAULT;
0722     if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
0723         user_read_access_end();
0724         force_exit_sig(SIGSEGV);
0725         return -EFAULT;
0726     }
0727     user_read_access_end();
0728 
0729     /* This returns like rt_sigreturn */
0730     set_thread_flag(TIF_RESTOREALL);
0731 
0732     return 0;
0733 
0734 efault_out:
0735     user_write_access_end();
0736     return -EFAULT;
0737 }
0738 
0739 
0740 /*
0741  * Do a signal return; undo the signal stack.
0742  */
0743 
0744 SYSCALL_DEFINE0(rt_sigreturn)
0745 {
0746     struct pt_regs *regs = current_pt_regs();
0747     struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
0748     sigset_t set;
0749     unsigned long msr;
0750 
0751     /* Always make any pending restarted system calls return -EINTR */
0752     current->restart_block.fn = do_no_restart_syscall;
0753 
0754     if (!access_ok(uc, sizeof(*uc)))
0755         goto badframe;
0756 
0757     if (__get_user_sigset(&set, &uc->uc_sigmask))
0758         goto badframe;
0759     set_current_blocked(&set);
0760 
0761     if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) {
0762         /*
0763          * If there is a transactional state then throw it away.
0764          * The purpose of a sigreturn is to destroy all traces of the
0765          * signal frame, this includes any transactional state created
0766          * within in. We only check for suspended as we can never be
0767          * active in the kernel, we are active, there is nothing better to
0768          * do than go ahead and Bad Thing later.
0769          * The cause is not important as there will never be a
0770          * recheckpoint so it's not user visible.
0771          */
0772         if (MSR_TM_SUSPENDED(mfmsr()))
0773             tm_reclaim_current(0);
0774 
0775         /*
0776          * Disable MSR[TS] bit also, so, if there is an exception in the
0777          * code below (as a page fault in copy_ckvsx_to_user()), it does
0778          * not recheckpoint this task if there was a context switch inside
0779          * the exception.
0780          *
0781          * A major page fault can indirectly call schedule(). A reschedule
0782          * process in the middle of an exception can have a side effect
0783          * (Changing the CPU MSR[TS] state), since schedule() is called
0784          * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
0785          * (switch_to() calls tm_recheckpoint() for the 'new' process). In
0786          * this case, the process continues to be the same in the CPU, but
0787          * the CPU state just changed.
0788          *
0789          * This can cause a TM Bad Thing, since the MSR in the stack will
0790          * have the MSR[TS]=0, and this is what will be used to RFID.
0791          *
0792          * Clearing MSR[TS] state here will avoid a recheckpoint if there
0793          * is any process reschedule in kernel space. The MSR[TS] state
0794          * does not need to be saved also, since it will be replaced with
0795          * the MSR[TS] that came from user context later, at
0796          * restore_tm_sigcontexts.
0797          */
0798         regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
0799 
0800         if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
0801             goto badframe;
0802     }
0803 
0804     if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) {
0805         /* We recheckpoint on return. */
0806         struct ucontext __user *uc_transact;
0807 
0808         /* Trying to start TM on non TM system */
0809         if (!cpu_has_feature(CPU_FTR_TM))
0810             goto badframe;
0811 
0812         if (__get_user(uc_transact, &uc->uc_link))
0813             goto badframe;
0814         if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
0815                        &uc_transact->uc_mcontext))
0816             goto badframe;
0817     } else {
0818         /*
0819          * Fall through, for non-TM restore
0820          *
0821          * Unset MSR[TS] on the thread regs since MSR from user
0822          * context does not have MSR active, and recheckpoint was
0823          * not called since restore_tm_sigcontexts() was not called
0824          * also.
0825          *
0826          * If not unsetting it, the code can RFID to userspace with
0827          * MSR[TS] set, but without CPU in the proper state,
0828          * causing a TM bad thing.
0829          */
0830         regs_set_return_msr(current->thread.regs,
0831                 current->thread.regs->msr & ~MSR_TS_MASK);
0832         if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext)))
0833             goto badframe;
0834 
0835         unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext,
0836                       badframe_block);
0837 
0838         user_read_access_end();
0839     }
0840 
0841     if (restore_altstack(&uc->uc_stack))
0842         goto badframe;
0843 
0844     set_thread_flag(TIF_RESTOREALL);
0845 
0846     return 0;
0847 
0848 badframe_block:
0849     user_read_access_end();
0850 badframe:
0851     signal_fault(current, regs, "rt_sigreturn", uc);
0852 
0853     force_sig(SIGSEGV);
0854     return 0;
0855 }
0856 
0857 int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
0858         struct task_struct *tsk)
0859 {
0860     struct rt_sigframe __user *frame;
0861     unsigned long newsp = 0;
0862     long err = 0;
0863     struct pt_regs *regs = tsk->thread.regs;
0864     /* Save the thread's msr before get_tm_stackpointer() changes it */
0865     unsigned long msr = regs->msr;
0866 
0867     frame = get_sigframe(ksig, tsk, sizeof(*frame), 0);
0868 
0869     /*
0870      * This only applies when calling unsafe_setup_sigcontext() and must be
0871      * called before opening the uaccess window.
0872      */
0873     if (!MSR_TM_ACTIVE(msr))
0874         prepare_setup_sigcontext(tsk);
0875 
0876     if (!user_write_access_begin(frame, sizeof(*frame)))
0877         goto badframe;
0878 
0879     unsafe_put_user(&frame->info, &frame->pinfo, badframe_block);
0880     unsafe_put_user(&frame->uc, &frame->puc, badframe_block);
0881 
0882     /* Create the ucontext.  */
0883     unsafe_put_user(0, &frame->uc.uc_flags, badframe_block);
0884     unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block);
0885 
0886     if (MSR_TM_ACTIVE(msr)) {
0887 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0888         /* The ucontext_t passed to userland points to the second
0889          * ucontext_t (for transactional state) with its uc_link ptr.
0890          */
0891         unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block);
0892 
0893         user_write_access_end();
0894 
0895         err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
0896                         &frame->uc_transact.uc_mcontext,
0897                         tsk, ksig->sig, NULL,
0898                         (unsigned long)ksig->ka.sa.sa_handler,
0899                         msr);
0900 
0901         if (!user_write_access_begin(&frame->uc.uc_sigmask,
0902                          sizeof(frame->uc.uc_sigmask)))
0903             goto badframe;
0904 
0905 #endif
0906     } else {
0907         unsafe_put_user(0, &frame->uc.uc_link, badframe_block);
0908         unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
0909                     NULL, (unsigned long)ksig->ka.sa.sa_handler,
0910                     1, badframe_block);
0911     }
0912 
0913     unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
0914     user_write_access_end();
0915 
0916     /* Save the siginfo outside of the unsafe block. */
0917     if (copy_siginfo_to_user(&frame->info, &ksig->info))
0918         goto badframe;
0919 
0920     /* Make sure signal handler doesn't get spurious FP exceptions */
0921     tsk->thread.fp_state.fpscr = 0;
0922 
0923     /* Set up to return from userspace. */
0924     if (tsk->mm->context.vdso) {
0925         regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64));
0926     } else {
0927         err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
0928         if (err)
0929             goto badframe;
0930         regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]);
0931     }
0932 
0933     /* Allocate a dummy caller frame for the signal handler. */
0934     newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
0935     err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
0936 
0937     /* Set up "regs" so we "return" to the signal handler. */
0938     if (is_elf2_task()) {
0939         regs->ctr = (unsigned long) ksig->ka.sa.sa_handler;
0940         regs->gpr[12] = regs->ctr;
0941     } else {
0942         /* Handler is *really* a pointer to the function descriptor for
0943          * the signal routine.  The first entry in the function
0944          * descriptor is the entry address of signal and the second
0945          * entry is the TOC value we need to use.
0946          */
0947         struct func_desc __user *ptr =
0948             (struct func_desc __user *)ksig->ka.sa.sa_handler;
0949 
0950         err |= get_user(regs->ctr, &ptr->addr);
0951         err |= get_user(regs->gpr[2], &ptr->toc);
0952     }
0953 
0954     /* enter the signal handler in native-endian mode */
0955     regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
0956     regs->gpr[1] = newsp;
0957     regs->gpr[3] = ksig->sig;
0958     regs->result = 0;
0959     if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
0960         regs->gpr[4] = (unsigned long)&frame->info;
0961         regs->gpr[5] = (unsigned long)&frame->uc;
0962         regs->gpr[6] = (unsigned long) frame;
0963     } else {
0964         regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
0965     }
0966     if (err)
0967         goto badframe;
0968 
0969     return 0;
0970 
0971 badframe_block:
0972     user_write_access_end();
0973 badframe:
0974     signal_fault(current, regs, "handle_rt_signal64", frame);
0975 
0976     return 1;
0977 }