Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
0004  *
0005  *  PowerPC version
0006  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0007  * Copyright (C) 2001 IBM
0008  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
0009  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
0010  *
0011  *  Derived from "arch/i386/kernel/signal.c"
0012  *    Copyright (C) 1991, 1992 Linus Torvalds
0013  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
0014  */
0015 
0016 #include <linux/sched.h>
0017 #include <linux/mm.h>
0018 #include <linux/smp.h>
0019 #include <linux/kernel.h>
0020 #include <linux/signal.h>
0021 #include <linux/errno.h>
0022 #include <linux/elf.h>
0023 #include <linux/ptrace.h>
0024 #include <linux/pagemap.h>
0025 #include <linux/ratelimit.h>
0026 #include <linux/syscalls.h>
0027 #ifdef CONFIG_PPC64
0028 #include <linux/compat.h>
0029 #else
0030 #include <linux/wait.h>
0031 #include <linux/unistd.h>
0032 #include <linux/stddef.h>
0033 #include <linux/tty.h>
0034 #include <linux/binfmts.h>
0035 #endif
0036 
0037 #include <linux/uaccess.h>
0038 #include <asm/cacheflush.h>
0039 #include <asm/syscalls.h>
0040 #include <asm/sigcontext.h>
0041 #include <asm/vdso.h>
0042 #include <asm/switch_to.h>
0043 #include <asm/tm.h>
0044 #include <asm/asm-prototypes.h>
0045 #ifdef CONFIG_PPC64
0046 #include "ppc32.h"
0047 #include <asm/unistd.h>
0048 #else
0049 #include <asm/ucontext.h>
0050 #endif
0051 
0052 #include "signal.h"
0053 
0054 
0055 #ifdef CONFIG_PPC64
0056 #define old_sigaction   old_sigaction32
0057 #define sigcontext  sigcontext32
0058 #define mcontext    mcontext32
0059 #define ucontext    ucontext32
0060 
0061 /*
0062  * Userspace code may pass a ucontext which doesn't include VSX added
0063  * at the end.  We need to check for this case.
0064  */
0065 #define UCONTEXTSIZEWITHOUTVSX \
0066         (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
0067 
0068 /*
0069  * Returning 0 means we return to userspace via
0070  * ret_from_except and thus restore all user
0071  * registers from *regs.  This is what we need
0072  * to do when a signal has been delivered.
0073  */
0074 
0075 #define GP_REGS_SIZE    min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
0076 #undef __SIGNAL_FRAMESIZE
0077 #define __SIGNAL_FRAMESIZE  __SIGNAL_FRAMESIZE32
0078 #undef ELF_NVRREG
0079 #define ELF_NVRREG  ELF_NVRREG32
0080 
0081 /*
0082  * Functions for flipping sigsets (thanks to brain dead generic
0083  * implementation that makes things simple for little endian only)
0084  */
0085 #define unsafe_put_sigset_t unsafe_put_compat_sigset
0086 #define unsafe_get_sigset_t unsafe_get_compat_sigset
0087 
0088 #define to_user_ptr(p)      ptr_to_compat(p)
0089 #define from_user_ptr(p)    compat_ptr(p)
0090 
0091 static __always_inline int
0092 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
0093 {
0094     elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
0095     int val, i;
0096 
0097     for (i = 0; i <= PT_RESULT; i ++) {
0098         /* Force usr to alway see softe as 1 (interrupts enabled) */
0099         if (i == PT_SOFTE)
0100             val = 1;
0101         else
0102             val = gregs[i];
0103 
0104         unsafe_put_user(val, &frame->mc_gregs[i], failed);
0105     }
0106     return 0;
0107 
0108 failed:
0109     return 1;
0110 }
0111 
0112 static __always_inline int
0113 __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
0114 {
0115     elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
0116     int i;
0117 
0118     for (i = 0; i <= PT_RESULT; i++) {
0119         if ((i == PT_MSR) || (i == PT_SOFTE))
0120             continue;
0121         unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
0122     }
0123     return 0;
0124 
0125 failed:
0126     return 1;
0127 }
0128 
0129 #else /* CONFIG_PPC64 */
0130 
0131 #define GP_REGS_SIZE    min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
0132 
0133 #define unsafe_put_sigset_t(uset, set, label) do {          \
0134     sigset_t __user *__us = uset    ;               \
0135     const sigset_t *__s = set;                  \
0136                                     \
0137     unsafe_copy_to_user(__us, __s, sizeof(*__us), label);       \
0138 } while (0)
0139 
0140 #define unsafe_get_sigset_t unsafe_get_user_sigset
0141 
0142 #define to_user_ptr(p)      ((unsigned long)(p))
0143 #define from_user_ptr(p)    ((void __user *)(p))
0144 
0145 static __always_inline int
0146 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
0147 {
0148     unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
0149     return 0;
0150 
0151 failed:
0152     return 1;
0153 }
0154 
0155 static __always_inline
0156 int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
0157 {
0158     /* copy up to but not including MSR */
0159     unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
0160 
0161     /* copy from orig_r3 (the word after the MSR) up to the end */
0162     unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
0163                   GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
0164 
0165     return 0;
0166 
0167 failed:
0168     return 1;
0169 }
0170 #endif
0171 
0172 #define unsafe_save_general_regs(regs, frame, label) do {   \
0173     if (__unsafe_save_general_regs(regs, frame))        \
0174         goto label;                 \
0175 } while (0)
0176 
0177 #define unsafe_restore_general_regs(regs, frame, label) do {    \
0178     if (__unsafe_restore_general_regs(regs, frame))     \
0179         goto label;                 \
0180 } while (0)
0181 
0182 /*
0183  * When we have signals to deliver, we set up on the
0184  * user stack, going down from the original stack pointer:
0185  *  an ABI gap of 56 words
0186  *  an mcontext struct
0187  *  a sigcontext struct
0188  *  a gap of __SIGNAL_FRAMESIZE bytes
0189  *
0190  * Each of these things must be a multiple of 16 bytes in size. The following
0191  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
0192  *
0193  */
0194 struct sigframe {
0195     struct sigcontext sctx;     /* the sigcontext */
0196     struct mcontext mctx;       /* all the register values */
0197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0198     struct sigcontext sctx_transact;
0199     struct mcontext mctx_transact;
0200 #endif
0201     /*
0202      * Programs using the rs6000/xcoff abi can save up to 19 gp
0203      * regs and 18 fp regs below sp before decrementing it.
0204      */
0205     int         abigap[56];
0206 };
0207 
0208 /*
0209  *  When we have rt signals to deliver, we set up on the
0210  *  user stack, going down from the original stack pointer:
0211  *  one rt_sigframe struct (siginfo + ucontext + ABI gap)
0212  *  a gap of __SIGNAL_FRAMESIZE+16 bytes
0213  *  (the +16 is to get the siginfo and ucontext in the same
0214  *  positions as in older kernels).
0215  *
0216  *  Each of these things must be a multiple of 16 bytes in size.
0217  *
0218  */
0219 struct rt_sigframe {
0220 #ifdef CONFIG_PPC64
0221     compat_siginfo_t info;
0222 #else
0223     struct siginfo info;
0224 #endif
0225     struct ucontext uc;
0226 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0227     struct ucontext uc_transact;
0228 #endif
0229     /*
0230      * Programs using the rs6000/xcoff abi can save up to 19 gp
0231      * regs and 18 fp regs below sp before decrementing it.
0232      */
0233     int         abigap[56];
0234 };
0235 
0236 unsigned long get_min_sigframe_size_32(void)
0237 {
0238     return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
0239            sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
0240 }
0241 
0242 /*
0243  * Save the current user registers on the user stack.
0244  * We only save the altivec/spe registers if the process has used
0245  * altivec/spe instructions at some point.
0246  */
0247 static void prepare_save_user_regs(int ctx_has_vsx_region)
0248 {
0249     /* Make sure floating point registers are stored in regs */
0250     flush_fp_to_thread(current);
0251 #ifdef CONFIG_ALTIVEC
0252     if (current->thread.used_vr)
0253         flush_altivec_to_thread(current);
0254     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0255         current->thread.vrsave = mfspr(SPRN_VRSAVE);
0256 #endif
0257 #ifdef CONFIG_VSX
0258     if (current->thread.used_vsr && ctx_has_vsx_region)
0259         flush_vsx_to_thread(current);
0260 #endif
0261 #ifdef CONFIG_SPE
0262     if (current->thread.used_spe)
0263         flush_spe_to_thread(current);
0264 #endif
0265 }
0266 
0267 static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
0268                    struct mcontext __user *tm_frame, int ctx_has_vsx_region)
0269 {
0270     unsigned long msr = regs->msr;
0271 
0272     /* save general registers */
0273     unsafe_save_general_regs(regs, frame, failed);
0274 
0275 #ifdef CONFIG_ALTIVEC
0276     /* save altivec registers */
0277     if (current->thread.used_vr) {
0278         unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
0279                     ELF_NVRREG * sizeof(vector128), failed);
0280         /* set MSR_VEC in the saved MSR value to indicate that
0281            frame->mc_vregs contains valid data */
0282         msr |= MSR_VEC;
0283     }
0284     /* else assert((regs->msr & MSR_VEC) == 0) */
0285 
0286     /* We always copy to/from vrsave, it's 0 if we don't have or don't
0287      * use altivec. Since VSCR only contains 32 bits saved in the least
0288      * significant bits of a vector, we "cheat" and stuff VRSAVE in the
0289      * most significant bits of that same vector. --BenH
0290      * Note that the current VRSAVE value is in the SPR at this point.
0291      */
0292     unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
0293             failed);
0294 #endif /* CONFIG_ALTIVEC */
0295     unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
0296 
0297     /*
0298      * Clear the MSR VSX bit to indicate there is no valid state attached
0299      * to this context, except in the specific case below where we set it.
0300      */
0301     msr &= ~MSR_VSX;
0302 #ifdef CONFIG_VSX
0303     /*
0304      * Copy VSR 0-31 upper half from thread_struct to local
0305      * buffer, then write that to userspace.  Also set MSR_VSX in
0306      * the saved MSR value to indicate that frame->mc_vregs
0307      * contains valid data
0308      */
0309     if (current->thread.used_vsr && ctx_has_vsx_region) {
0310         unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
0311         msr |= MSR_VSX;
0312     }
0313 #endif /* CONFIG_VSX */
0314 #ifdef CONFIG_SPE
0315     /* save spe registers */
0316     if (current->thread.used_spe) {
0317         unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
0318                     ELF_NEVRREG * sizeof(u32), failed);
0319         /* set MSR_SPE in the saved MSR value to indicate that
0320            frame->mc_vregs contains valid data */
0321         msr |= MSR_SPE;
0322     }
0323     /* else assert((regs->msr & MSR_SPE) == 0) */
0324 
0325     /* We always copy to/from spefscr */
0326     unsafe_put_user(current->thread.spefscr,
0327             (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
0328 #endif /* CONFIG_SPE */
0329 
0330     unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
0331 
0332     /* We need to write 0 the MSR top 32 bits in the tm frame so that we
0333      * can check it on the restore to see if TM is active
0334      */
0335     if (tm_frame)
0336         unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
0337 
0338     return 0;
0339 
0340 failed:
0341     return 1;
0342 }
0343 
0344 #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
0345     if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx))    \
0346         goto label;                     \
0347 } while (0)
0348 
0349 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0350 /*
0351  * Save the current user registers on the user stack.
0352  * We only save the altivec/spe registers if the process has used
0353  * altivec/spe instructions at some point.
0354  * We also save the transactional registers to a second ucontext in the
0355  * frame.
0356  *
0357  * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
0358  */
0359 static void prepare_save_tm_user_regs(void)
0360 {
0361     WARN_ON(tm_suspend_disabled);
0362 
0363     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0364         current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
0365 }
0366 
0367 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
0368                     struct mcontext __user *tm_frame, unsigned long msr)
0369 {
0370     /* Save both sets of general registers */
0371     unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
0372     unsafe_save_general_regs(regs, tm_frame, failed);
0373 
0374     /* Stash the top half of the 64bit MSR into the 32bit MSR word
0375      * of the transactional mcontext.  This way we have a backward-compatible
0376      * MSR in the 'normal' (checkpointed) mcontext and additionally one can
0377      * also look at what type of transaction (T or S) was active at the
0378      * time of the signal.
0379      */
0380     unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
0381 
0382     /* save altivec registers */
0383     if (current->thread.used_vr) {
0384         unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
0385                     ELF_NVRREG * sizeof(vector128), failed);
0386         if (msr & MSR_VEC)
0387             unsafe_copy_to_user(&tm_frame->mc_vregs,
0388                         &current->thread.vr_state,
0389                         ELF_NVRREG * sizeof(vector128), failed);
0390         else
0391             unsafe_copy_to_user(&tm_frame->mc_vregs,
0392                         &current->thread.ckvr_state,
0393                         ELF_NVRREG * sizeof(vector128), failed);
0394 
0395         /* set MSR_VEC in the saved MSR value to indicate that
0396          * frame->mc_vregs contains valid data
0397          */
0398         msr |= MSR_VEC;
0399     }
0400 
0401     /* We always copy to/from vrsave, it's 0 if we don't have or don't
0402      * use altivec. Since VSCR only contains 32 bits saved in the least
0403      * significant bits of a vector, we "cheat" and stuff VRSAVE in the
0404      * most significant bits of that same vector. --BenH
0405      */
0406     unsafe_put_user(current->thread.ckvrsave,
0407             (u32 __user *)&frame->mc_vregs[32], failed);
0408     if (msr & MSR_VEC)
0409         unsafe_put_user(current->thread.vrsave,
0410                 (u32 __user *)&tm_frame->mc_vregs[32], failed);
0411     else
0412         unsafe_put_user(current->thread.ckvrsave,
0413                 (u32 __user *)&tm_frame->mc_vregs[32], failed);
0414 
0415     unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
0416     if (msr & MSR_FP)
0417         unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
0418     else
0419         unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
0420 
0421     /*
0422      * Copy VSR 0-31 upper half from thread_struct to local
0423      * buffer, then write that to userspace.  Also set MSR_VSX in
0424      * the saved MSR value to indicate that frame->mc_vregs
0425      * contains valid data
0426      */
0427     if (current->thread.used_vsr) {
0428         unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
0429         if (msr & MSR_VSX)
0430             unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
0431         else
0432             unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
0433 
0434         msr |= MSR_VSX;
0435     }
0436 
0437     unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
0438 
0439     return 0;
0440 
0441 failed:
0442     return 1;
0443 }
0444 #else
0445 static void prepare_save_tm_user_regs(void) { }
0446 
0447 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
0448                     struct mcontext __user *tm_frame, unsigned long msr)
0449 {
0450     return 0;
0451 }
0452 #endif
0453 
0454 #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
0455     if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr))   \
0456         goto label;                     \
0457 } while (0)
0458 
0459 /*
0460  * Restore the current user register values from the user stack,
0461  * (except for MSR).
0462  */
0463 static long restore_user_regs(struct pt_regs *regs,
0464                   struct mcontext __user *sr, int sig)
0465 {
0466     unsigned int save_r2 = 0;
0467     unsigned long msr;
0468 #ifdef CONFIG_VSX
0469     int i;
0470 #endif
0471 
0472     if (!user_read_access_begin(sr, sizeof(*sr)))
0473         return 1;
0474     /*
0475      * restore general registers but not including MSR or SOFTE. Also
0476      * take care of keeping r2 (TLS) intact if not a signal
0477      */
0478     if (!sig)
0479         save_r2 = (unsigned int)regs->gpr[2];
0480     unsafe_restore_general_regs(regs, sr, failed);
0481     set_trap_norestart(regs);
0482     unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
0483     if (!sig)
0484         regs->gpr[2] = (unsigned long) save_r2;
0485 
0486     /* if doing signal return, restore the previous little-endian mode */
0487     if (sig)
0488         regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
0489 
0490 #ifdef CONFIG_ALTIVEC
0491     /*
0492      * Force the process to reload the altivec registers from
0493      * current->thread when it next does altivec instructions
0494      */
0495     regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
0496     if (msr & MSR_VEC) {
0497         /* restore altivec registers from the stack */
0498         unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
0499                       sizeof(sr->mc_vregs), failed);
0500         current->thread.used_vr = true;
0501     } else if (current->thread.used_vr)
0502         memset(&current->thread.vr_state, 0,
0503                ELF_NVRREG * sizeof(vector128));
0504 
0505     /* Always get VRSAVE back */
0506     unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
0507     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0508         mtspr(SPRN_VRSAVE, current->thread.vrsave);
0509 #endif /* CONFIG_ALTIVEC */
0510     unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
0511 
0512 #ifdef CONFIG_VSX
0513     /*
0514      * Force the process to reload the VSX registers from
0515      * current->thread when it next does VSX instruction.
0516      */
0517     regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
0518     if (msr & MSR_VSX) {
0519         /*
0520          * Restore altivec registers from the stack to a local
0521          * buffer, then write this out to the thread_struct
0522          */
0523         unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
0524         current->thread.used_vsr = true;
0525     } else if (current->thread.used_vsr)
0526         for (i = 0; i < 32 ; i++)
0527             current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0528 #endif /* CONFIG_VSX */
0529     /*
0530      * force the process to reload the FP registers from
0531      * current->thread when it next does FP instructions
0532      */
0533     regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
0534 
0535 #ifdef CONFIG_SPE
0536     /*
0537      * Force the process to reload the spe registers from
0538      * current->thread when it next does spe instructions.
0539      * Since this is user ABI, we must enforce the sizing.
0540      */
0541     BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
0542     regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
0543     if (msr & MSR_SPE) {
0544         /* restore spe registers from the stack */
0545         unsafe_copy_from_user(&current->thread.spe, &sr->mc_vregs,
0546                       sizeof(current->thread.spe), failed);
0547         current->thread.used_spe = true;
0548     } else if (current->thread.used_spe)
0549         memset(&current->thread.spe, 0, sizeof(current->thread.spe));
0550 
0551     /* Always get SPEFSCR back */
0552     unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
0553 #endif /* CONFIG_SPE */
0554 
0555     user_read_access_end();
0556     return 0;
0557 
0558 failed:
0559     user_read_access_end();
0560     return 1;
0561 }
0562 
0563 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0564 /*
0565  * Restore the current user register values from the user stack, except for
0566  * MSR, and recheckpoint the original checkpointed register state for processes
0567  * in transactions.
0568  */
0569 static long restore_tm_user_regs(struct pt_regs *regs,
0570                  struct mcontext __user *sr,
0571                  struct mcontext __user *tm_sr)
0572 {
0573     unsigned long msr, msr_hi;
0574     int i;
0575 
0576     if (tm_suspend_disabled)
0577         return 1;
0578     /*
0579      * restore general registers but not including MSR or SOFTE. Also
0580      * take care of keeping r2 (TLS) intact if not a signal.
0581      * See comment in signal_64.c:restore_tm_sigcontexts();
0582      * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
0583      * were set by the signal delivery.
0584      */
0585     if (!user_read_access_begin(sr, sizeof(*sr)))
0586         return 1;
0587 
0588     unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
0589     unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
0590     unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
0591 
0592     /* Restore the previous little-endian mode */
0593     regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
0594 
0595     regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
0596     if (msr & MSR_VEC) {
0597         /* restore altivec registers from the stack */
0598         unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
0599                       sizeof(sr->mc_vregs), failed);
0600         current->thread.used_vr = true;
0601     } else if (current->thread.used_vr) {
0602         memset(&current->thread.vr_state, 0,
0603                ELF_NVRREG * sizeof(vector128));
0604         memset(&current->thread.ckvr_state, 0,
0605                ELF_NVRREG * sizeof(vector128));
0606     }
0607 
0608     /* Always get VRSAVE back */
0609     unsafe_get_user(current->thread.ckvrsave,
0610             (u32 __user *)&sr->mc_vregs[32], failed);
0611     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0612         mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
0613 
0614     regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
0615 
0616     unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
0617 
0618     regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
0619     if (msr & MSR_VSX) {
0620         /*
0621          * Restore altivec registers from the stack to a local
0622          * buffer, then write this out to the thread_struct
0623          */
0624         unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
0625         current->thread.used_vsr = true;
0626     } else if (current->thread.used_vsr)
0627         for (i = 0; i < 32 ; i++) {
0628             current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0629             current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0630         }
0631 
0632     user_read_access_end();
0633 
0634     if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
0635         return 1;
0636 
0637     unsafe_restore_general_regs(regs, tm_sr, failed);
0638 
0639     /* restore altivec registers from the stack */
0640     if (msr & MSR_VEC)
0641         unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
0642                       sizeof(sr->mc_vregs), failed);
0643 
0644     /* Always get VRSAVE back */
0645     unsafe_get_user(current->thread.vrsave,
0646             (u32 __user *)&tm_sr->mc_vregs[32], failed);
0647 
0648     unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
0649 
0650     if (msr & MSR_VSX) {
0651         /*
0652          * Restore altivec registers from the stack to a local
0653          * buffer, then write this out to the thread_struct
0654          */
0655         unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
0656         current->thread.used_vsr = true;
0657     }
0658 
0659     /* Get the top half of the MSR from the user context */
0660     unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
0661     msr_hi <<= 32;
0662 
0663     user_read_access_end();
0664 
0665     /* If TM bits are set to the reserved value, it's an invalid context */
0666     if (MSR_TM_RESV(msr_hi))
0667         return 1;
0668 
0669     /*
0670      * Disabling preemption, since it is unsafe to be preempted
0671      * with MSR[TS] set without recheckpointing.
0672      */
0673     preempt_disable();
0674 
0675     /*
0676      * CAUTION:
0677      * After regs->MSR[TS] being updated, make sure that get_user(),
0678      * put_user() or similar functions are *not* called. These
0679      * functions can generate page faults which will cause the process
0680      * to be de-scheduled with MSR[TS] set but without calling
0681      * tm_recheckpoint(). This can cause a bug.
0682      *
0683      * Pull in the MSR TM bits from the user context
0684      */
0685     regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
0686     /* Now, recheckpoint.  This loads up all of the checkpointed (older)
0687      * registers, including FP and V[S]Rs.  After recheckpointing, the
0688      * transactional versions should be loaded.
0689      */
0690     tm_enable();
0691     /* Make sure the transaction is marked as failed */
0692     current->thread.tm_texasr |= TEXASR_FS;
0693     /* This loads the checkpointed FP/VEC state, if used */
0694     tm_recheckpoint(&current->thread);
0695 
0696     /* This loads the speculative FP/VEC state, if used */
0697     msr_check_and_set(msr & (MSR_FP | MSR_VEC));
0698     if (msr & MSR_FP) {
0699         load_fp_state(&current->thread.fp_state);
0700         regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
0701     }
0702     if (msr & MSR_VEC) {
0703         load_vr_state(&current->thread.vr_state);
0704         regs_set_return_msr(regs, regs->msr | MSR_VEC);
0705     }
0706 
0707     preempt_enable();
0708 
0709     return 0;
0710 
0711 failed:
0712     user_read_access_end();
0713     return 1;
0714 }
0715 #else
0716 static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
0717                  struct mcontext __user *tm_sr)
0718 {
0719     return 0;
0720 }
0721 #endif
0722 
0723 #ifdef CONFIG_PPC64
0724 
0725 #define copy_siginfo_to_user    copy_siginfo_to_user32
0726 
0727 #endif /* CONFIG_PPC64 */
0728 
0729 /*
0730  * Set up a signal frame for a "real-time" signal handler
0731  * (one which gets siginfo).
0732  */
0733 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
0734                struct task_struct *tsk)
0735 {
0736     struct rt_sigframe __user *frame;
0737     struct mcontext __user *mctx;
0738     struct mcontext __user *tm_mctx = NULL;
0739     unsigned long newsp = 0;
0740     unsigned long tramp;
0741     struct pt_regs *regs = tsk->thread.regs;
0742     /* Save the thread's msr before get_tm_stackpointer() changes it */
0743     unsigned long msr = regs->msr;
0744 
0745     /* Set up Signal Frame */
0746     frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
0747     mctx = &frame->uc.uc_mcontext;
0748 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0749     tm_mctx = &frame->uc_transact.uc_mcontext;
0750 #endif
0751     if (MSR_TM_ACTIVE(msr))
0752         prepare_save_tm_user_regs();
0753     else
0754         prepare_save_user_regs(1);
0755 
0756     if (!user_access_begin(frame, sizeof(*frame)))
0757         goto badframe;
0758 
0759     /* Put the siginfo & fill in most of the ucontext */
0760     unsafe_put_user(0, &frame->uc.uc_flags, failed);
0761 #ifdef CONFIG_PPC64
0762     unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
0763 #else
0764     unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
0765 #endif
0766     unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
0767 
0768     if (MSR_TM_ACTIVE(msr)) {
0769 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0770         unsafe_put_user((unsigned long)&frame->uc_transact,
0771                 &frame->uc.uc_link, failed);
0772         unsafe_put_user((unsigned long)tm_mctx,
0773                 &frame->uc_transact.uc_regs, failed);
0774 #endif
0775         unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
0776     } else {
0777         unsafe_put_user(0, &frame->uc.uc_link, failed);
0778         unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
0779     }
0780 
0781     /* Save user registers on the stack */
0782     if (tsk->mm->context.vdso) {
0783         tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
0784     } else {
0785         tramp = (unsigned long)mctx->mc_pad;
0786         unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
0787         unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
0788         asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
0789     }
0790     unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
0791 
0792     user_access_end();
0793 
0794     if (copy_siginfo_to_user(&frame->info, &ksig->info))
0795         goto badframe;
0796 
0797     regs->link = tramp;
0798 
0799 #ifdef CONFIG_PPC_FPU_REGS
0800     tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
0801 #endif
0802 
0803     /* create a stack frame for the caller of the handler */
0804     newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
0805     if (put_user(regs->gpr[1], (u32 __user *)newsp))
0806         goto badframe;
0807 
0808     /* Fill registers for signal handler */
0809     regs->gpr[1] = newsp;
0810     regs->gpr[3] = ksig->sig;
0811     regs->gpr[4] = (unsigned long)&frame->info;
0812     regs->gpr[5] = (unsigned long)&frame->uc;
0813     regs->gpr[6] = (unsigned long)frame;
0814     regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
0815     /* enter the signal handler in native-endian mode */
0816     regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
0817 
0818     return 0;
0819 
0820 failed:
0821     user_access_end();
0822 
0823 badframe:
0824     signal_fault(tsk, regs, "handle_rt_signal32", frame);
0825 
0826     return 1;
0827 }
0828 
0829 /*
0830  * OK, we're invoking a handler
0831  */
0832 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
0833         struct task_struct *tsk)
0834 {
0835     struct sigcontext __user *sc;
0836     struct sigframe __user *frame;
0837     struct mcontext __user *mctx;
0838     struct mcontext __user *tm_mctx = NULL;
0839     unsigned long newsp = 0;
0840     unsigned long tramp;
0841     struct pt_regs *regs = tsk->thread.regs;
0842     /* Save the thread's msr before get_tm_stackpointer() changes it */
0843     unsigned long msr = regs->msr;
0844 
0845     /* Set up Signal Frame */
0846     frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
0847     mctx = &frame->mctx;
0848 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0849     tm_mctx = &frame->mctx_transact;
0850 #endif
0851     if (MSR_TM_ACTIVE(msr))
0852         prepare_save_tm_user_regs();
0853     else
0854         prepare_save_user_regs(1);
0855 
0856     if (!user_access_begin(frame, sizeof(*frame)))
0857         goto badframe;
0858     sc = (struct sigcontext __user *) &frame->sctx;
0859 
0860 #if _NSIG != 64
0861 #error "Please adjust handle_signal()"
0862 #endif
0863     unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
0864     unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
0865 #ifdef CONFIG_PPC64
0866     unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
0867 #else
0868     unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
0869 #endif
0870     unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
0871     unsafe_put_user(ksig->sig, &sc->signal, failed);
0872 
0873     if (MSR_TM_ACTIVE(msr))
0874         unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
0875     else
0876         unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
0877 
0878     if (tsk->mm->context.vdso) {
0879         tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
0880     } else {
0881         tramp = (unsigned long)mctx->mc_pad;
0882         unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
0883         unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
0884         asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
0885     }
0886     user_access_end();
0887 
0888     regs->link = tramp;
0889 
0890 #ifdef CONFIG_PPC_FPU_REGS
0891     tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
0892 #endif
0893 
0894     /* create a stack frame for the caller of the handler */
0895     newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
0896     if (put_user(regs->gpr[1], (u32 __user *)newsp))
0897         goto badframe;
0898 
0899     regs->gpr[1] = newsp;
0900     regs->gpr[3] = ksig->sig;
0901     regs->gpr[4] = (unsigned long) sc;
0902     regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
0903     /* enter the signal handler in native-endian mode */
0904     regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
0905 
0906     return 0;
0907 
0908 failed:
0909     user_access_end();
0910 
0911 badframe:
0912     signal_fault(tsk, regs, "handle_signal32", frame);
0913 
0914     return 1;
0915 }
0916 
0917 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
0918 {
0919     sigset_t set;
0920     struct mcontext __user *mcp;
0921 
0922     if (!user_read_access_begin(ucp, sizeof(*ucp)))
0923         return -EFAULT;
0924 
0925     unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
0926 #ifdef CONFIG_PPC64
0927     {
0928         u32 cmcp;
0929 
0930         unsafe_get_user(cmcp, &ucp->uc_regs, failed);
0931         mcp = (struct mcontext __user *)(u64)cmcp;
0932     }
0933 #else
0934     unsafe_get_user(mcp, &ucp->uc_regs, failed);
0935 #endif
0936     user_read_access_end();
0937 
0938     set_current_blocked(&set);
0939     if (restore_user_regs(regs, mcp, sig))
0940         return -EFAULT;
0941 
0942     return 0;
0943 
0944 failed:
0945     user_read_access_end();
0946     return -EFAULT;
0947 }
0948 
0949 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0950 static int do_setcontext_tm(struct ucontext __user *ucp,
0951                 struct ucontext __user *tm_ucp,
0952                 struct pt_regs *regs)
0953 {
0954     sigset_t set;
0955     struct mcontext __user *mcp;
0956     struct mcontext __user *tm_mcp;
0957     u32 cmcp;
0958     u32 tm_cmcp;
0959 
0960     if (!user_read_access_begin(ucp, sizeof(*ucp)))
0961         return -EFAULT;
0962 
0963     unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
0964     unsafe_get_user(cmcp, &ucp->uc_regs, failed);
0965 
0966     user_read_access_end();
0967 
0968     if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
0969         return -EFAULT;
0970     mcp = (struct mcontext __user *)(u64)cmcp;
0971     tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
0972     /* no need to check access_ok(mcp), since mcp < 4GB */
0973 
0974     set_current_blocked(&set);
0975     if (restore_tm_user_regs(regs, mcp, tm_mcp))
0976         return -EFAULT;
0977 
0978     return 0;
0979 
0980 failed:
0981     user_read_access_end();
0982     return -EFAULT;
0983 }
0984 #endif
0985 
0986 #ifdef CONFIG_PPC64
0987 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
0988                struct ucontext __user *, new_ctx, int, ctx_size)
0989 #else
0990 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
0991                struct ucontext __user *, new_ctx, long, ctx_size)
0992 #endif
0993 {
0994     struct pt_regs *regs = current_pt_regs();
0995     int ctx_has_vsx_region = 0;
0996 
0997 #ifdef CONFIG_PPC64
0998     unsigned long new_msr = 0;
0999 
1000     if (new_ctx) {
1001         struct mcontext __user *mcp;
1002         u32 cmcp;
1003 
1004         /*
1005          * Get pointer to the real mcontext.  No need for
1006          * access_ok since we are dealing with compat
1007          * pointers.
1008          */
1009         if (__get_user(cmcp, &new_ctx->uc_regs))
1010             return -EFAULT;
1011         mcp = (struct mcontext __user *)(u64)cmcp;
1012         if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1013             return -EFAULT;
1014     }
1015     /*
1016      * Check that the context is not smaller than the original
1017      * size (with VMX but without VSX)
1018      */
1019     if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1020         return -EINVAL;
1021     /*
1022      * If the new context state sets the MSR VSX bits but
1023      * it doesn't provide VSX state.
1024      */
1025     if ((ctx_size < sizeof(struct ucontext)) &&
1026         (new_msr & MSR_VSX))
1027         return -EINVAL;
1028     /* Does the context have enough room to store VSX data? */
1029     if (ctx_size >= sizeof(struct ucontext))
1030         ctx_has_vsx_region = 1;
1031 #else
1032     /* Context size is for future use. Right now, we only make sure
1033      * we are passed something we understand
1034      */
1035     if (ctx_size < sizeof(struct ucontext))
1036         return -EINVAL;
1037 #endif
1038     if (old_ctx != NULL) {
1039         struct mcontext __user *mctx;
1040 
1041         /*
1042          * old_ctx might not be 16-byte aligned, in which
1043          * case old_ctx->uc_mcontext won't be either.
1044          * Because we have the old_ctx->uc_pad2 field
1045          * before old_ctx->uc_mcontext, we need to round down
1046          * from &old_ctx->uc_mcontext to a 16-byte boundary.
1047          */
1048         mctx = (struct mcontext __user *)
1049             ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1050         prepare_save_user_regs(ctx_has_vsx_region);
1051         if (!user_write_access_begin(old_ctx, ctx_size))
1052             return -EFAULT;
1053         unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1054         unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
1055         unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1056         user_write_access_end();
1057     }
1058     if (new_ctx == NULL)
1059         return 0;
1060     if (!access_ok(new_ctx, ctx_size) ||
1061         fault_in_readable((char __user *)new_ctx, ctx_size))
1062         return -EFAULT;
1063 
1064     /*
1065      * If we get a fault copying the context into the kernel's
1066      * image of the user's registers, we can't just return -EFAULT
1067      * because the user's registers will be corrupted.  For instance
1068      * the NIP value may have been updated but not some of the
1069      * other registers.  Given that we have done the access_ok
1070      * and successfully read the first and last bytes of the region
1071      * above, this should only happen in an out-of-memory situation
1072      * or if another thread unmaps the region containing the context.
1073      * We kill the task with a SIGSEGV in this situation.
1074      */
1075     if (do_setcontext(new_ctx, regs, 0)) {
1076         force_exit_sig(SIGSEGV);
1077         return -EFAULT;
1078     }
1079 
1080     set_thread_flag(TIF_RESTOREALL);
1081     return 0;
1082 
1083 failed:
1084     user_write_access_end();
1085     return -EFAULT;
1086 }
1087 
1088 #ifdef CONFIG_PPC64
1089 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1090 #else
1091 SYSCALL_DEFINE0(rt_sigreturn)
1092 #endif
1093 {
1094     struct rt_sigframe __user *rt_sf;
1095     struct pt_regs *regs = current_pt_regs();
1096     int tm_restore = 0;
1097 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1098     struct ucontext __user *uc_transact;
1099     unsigned long msr_hi;
1100     unsigned long tmp;
1101 #endif
1102     /* Always make any pending restarted system calls return -EINTR */
1103     current->restart_block.fn = do_no_restart_syscall;
1104 
1105     rt_sf = (struct rt_sigframe __user *)
1106         (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1107     if (!access_ok(rt_sf, sizeof(*rt_sf)))
1108         goto bad;
1109 
1110 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1111     /*
1112      * If there is a transactional state then throw it away.
1113      * The purpose of a sigreturn is to destroy all traces of the
1114      * signal frame, this includes any transactional state created
1115      * within in. We only check for suspended as we can never be
1116      * active in the kernel, we are active, there is nothing better to
1117      * do than go ahead and Bad Thing later.
1118      * The cause is not important as there will never be a
1119      * recheckpoint so it's not user visible.
1120      */
1121     if (MSR_TM_SUSPENDED(mfmsr()))
1122         tm_reclaim_current(0);
1123 
1124     if (__get_user(tmp, &rt_sf->uc.uc_link))
1125         goto bad;
1126     uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1127     if (uc_transact) {
1128         u32 cmcp;
1129         struct mcontext __user *mcp;
1130 
1131         if (__get_user(cmcp, &uc_transact->uc_regs))
1132             return -EFAULT;
1133         mcp = (struct mcontext __user *)(u64)cmcp;
1134         /* The top 32 bits of the MSR are stashed in the transactional
1135          * ucontext. */
1136         if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1137             goto bad;
1138 
1139         if (MSR_TM_ACTIVE(msr_hi<<32)) {
1140             /* Trying to start TM on non TM system */
1141             if (!cpu_has_feature(CPU_FTR_TM))
1142                 goto bad;
1143             /* We only recheckpoint on return if we're
1144              * transaction.
1145              */
1146             tm_restore = 1;
1147             if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1148                 goto bad;
1149         }
1150     }
1151     if (!tm_restore) {
1152         /*
1153          * Unset regs->msr because ucontext MSR TS is not
1154          * set, and recheckpoint was not called. This avoid
1155          * hitting a TM Bad thing at RFID
1156          */
1157         regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
1158     }
1159     /* Fall through, for non-TM restore */
1160 #endif
1161     if (!tm_restore)
1162         if (do_setcontext(&rt_sf->uc, regs, 1))
1163             goto bad;
1164 
1165     /*
1166      * It's not clear whether or why it is desirable to save the
1167      * sigaltstack setting on signal delivery and restore it on
1168      * signal return.  But other architectures do this and we have
1169      * always done it up until now so it is probably better not to
1170      * change it.  -- paulus
1171      */
1172 #ifdef CONFIG_PPC64
1173     if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1174         goto bad;
1175 #else
1176     if (restore_altstack(&rt_sf->uc.uc_stack))
1177         goto bad;
1178 #endif
1179     set_thread_flag(TIF_RESTOREALL);
1180     return 0;
1181 
1182  bad:
1183     signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1184 
1185     force_sig(SIGSEGV);
1186     return 0;
1187 }
1188 
1189 #ifdef CONFIG_PPC32
1190 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1191              int, ndbg, struct sig_dbg_op __user *, dbg)
1192 {
1193     struct pt_regs *regs = current_pt_regs();
1194     struct sig_dbg_op op;
1195     int i;
1196     unsigned long new_msr = regs->msr;
1197 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1198     unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1199 #endif
1200 
1201     for (i=0; i<ndbg; i++) {
1202         if (copy_from_user(&op, dbg + i, sizeof(op)))
1203             return -EFAULT;
1204         switch (op.dbg_type) {
1205         case SIG_DBG_SINGLE_STEPPING:
1206 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1207             if (op.dbg_value) {
1208                 new_msr |= MSR_DE;
1209                 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1210             } else {
1211                 new_dbcr0 &= ~DBCR0_IC;
1212                 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1213                         current->thread.debug.dbcr1)) {
1214                     new_msr &= ~MSR_DE;
1215                     new_dbcr0 &= ~DBCR0_IDM;
1216                 }
1217             }
1218 #else
1219             if (op.dbg_value)
1220                 new_msr |= MSR_SE;
1221             else
1222                 new_msr &= ~MSR_SE;
1223 #endif
1224             break;
1225         case SIG_DBG_BRANCH_TRACING:
1226 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1227             return -EINVAL;
1228 #else
1229             if (op.dbg_value)
1230                 new_msr |= MSR_BE;
1231             else
1232                 new_msr &= ~MSR_BE;
1233 #endif
1234             break;
1235 
1236         default:
1237             return -EINVAL;
1238         }
1239     }
1240 
1241     /* We wait until here to actually install the values in the
1242        registers so if we fail in the above loop, it will not
1243        affect the contents of these registers.  After this point,
1244        failure is a problem, anyway, and it's very unlikely unless
1245        the user is really doing something wrong. */
1246     regs_set_return_msr(regs, new_msr);
1247 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1248     current->thread.debug.dbcr0 = new_dbcr0;
1249 #endif
1250 
1251     if (!access_ok(ctx, sizeof(*ctx)) ||
1252         fault_in_readable((char __user *)ctx, sizeof(*ctx)))
1253         return -EFAULT;
1254 
1255     /*
1256      * If we get a fault copying the context into the kernel's
1257      * image of the user's registers, we can't just return -EFAULT
1258      * because the user's registers will be corrupted.  For instance
1259      * the NIP value may have been updated but not some of the
1260      * other registers.  Given that we have done the access_ok
1261      * and successfully read the first and last bytes of the region
1262      * above, this should only happen in an out-of-memory situation
1263      * or if another thread unmaps the region containing the context.
1264      * We kill the task with a SIGSEGV in this situation.
1265      */
1266     if (do_setcontext(ctx, regs, 1)) {
1267         signal_fault(current, regs, "sys_debug_setcontext", ctx);
1268 
1269         force_sig(SIGSEGV);
1270         goto out;
1271     }
1272 
1273     /*
1274      * It's not clear whether or why it is desirable to save the
1275      * sigaltstack setting on signal delivery and restore it on
1276      * signal return.  But other architectures do this and we have
1277      * always done it up until now so it is probably better not to
1278      * change it.  -- paulus
1279      */
1280     restore_altstack(&ctx->uc_stack);
1281 
1282     set_thread_flag(TIF_RESTOREALL);
1283  out:
1284     return 0;
1285 }
1286 #endif
1287 
1288 /*
1289  * Do a signal return; undo the signal stack.
1290  */
1291 #ifdef CONFIG_PPC64
1292 COMPAT_SYSCALL_DEFINE0(sigreturn)
1293 #else
1294 SYSCALL_DEFINE0(sigreturn)
1295 #endif
1296 {
1297     struct pt_regs *regs = current_pt_regs();
1298     struct sigframe __user *sf;
1299     struct sigcontext __user *sc;
1300     struct sigcontext sigctx;
1301     struct mcontext __user *sr;
1302     sigset_t set;
1303     struct mcontext __user *mcp;
1304     struct mcontext __user *tm_mcp = NULL;
1305     unsigned long long msr_hi = 0;
1306 
1307     /* Always make any pending restarted system calls return -EINTR */
1308     current->restart_block.fn = do_no_restart_syscall;
1309 
1310     sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1311     sc = &sf->sctx;
1312     if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1313         goto badframe;
1314 
1315 #ifdef CONFIG_PPC64
1316     /*
1317      * Note that PPC32 puts the upper 32 bits of the sigmask in the
1318      * unused part of the signal stackframe
1319      */
1320     set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1321 #else
1322     set.sig[0] = sigctx.oldmask;
1323     set.sig[1] = sigctx._unused[3];
1324 #endif
1325     set_current_blocked(&set);
1326 
1327     mcp = (struct mcontext __user *)&sf->mctx;
1328 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1329     tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1330     if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1331         goto badframe;
1332 #endif
1333     if (MSR_TM_ACTIVE(msr_hi<<32)) {
1334         if (!cpu_has_feature(CPU_FTR_TM))
1335             goto badframe;
1336         if (restore_tm_user_regs(regs, mcp, tm_mcp))
1337             goto badframe;
1338     } else {
1339         sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1340         if (restore_user_regs(regs, sr, 1)) {
1341             signal_fault(current, regs, "sys_sigreturn", sr);
1342 
1343             force_sig(SIGSEGV);
1344             return 0;
1345         }
1346     }
1347 
1348     set_thread_flag(TIF_RESTOREALL);
1349     return 0;
1350 
1351 badframe:
1352     signal_fault(current, regs, "sys_sigreturn", sc);
1353 
1354     force_sig(SIGSEGV);
1355     return 0;
1356 }