0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <linux/sched.h>
0017 #include <linux/mm.h>
0018 #include <linux/smp.h>
0019 #include <linux/kernel.h>
0020 #include <linux/signal.h>
0021 #include <linux/errno.h>
0022 #include <linux/elf.h>
0023 #include <linux/ptrace.h>
0024 #include <linux/pagemap.h>
0025 #include <linux/ratelimit.h>
0026 #include <linux/syscalls.h>
0027 #ifdef CONFIG_PPC64
0028 #include <linux/compat.h>
0029 #else
0030 #include <linux/wait.h>
0031 #include <linux/unistd.h>
0032 #include <linux/stddef.h>
0033 #include <linux/tty.h>
0034 #include <linux/binfmts.h>
0035 #endif
0036
0037 #include <linux/uaccess.h>
0038 #include <asm/cacheflush.h>
0039 #include <asm/syscalls.h>
0040 #include <asm/sigcontext.h>
0041 #include <asm/vdso.h>
0042 #include <asm/switch_to.h>
0043 #include <asm/tm.h>
0044 #include <asm/asm-prototypes.h>
0045 #ifdef CONFIG_PPC64
0046 #include "ppc32.h"
0047 #include <asm/unistd.h>
0048 #else
0049 #include <asm/ucontext.h>
0050 #endif
0051
0052 #include "signal.h"
0053
0054
0055 #ifdef CONFIG_PPC64
0056 #define old_sigaction old_sigaction32
0057 #define sigcontext sigcontext32
0058 #define mcontext mcontext32
0059 #define ucontext ucontext32
0060
0061
0062
0063
0064
0065 #define UCONTEXTSIZEWITHOUTVSX \
0066 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
0067
0068
0069
0070
0071
0072
0073
0074
0075 #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
0076 #undef __SIGNAL_FRAMESIZE
0077 #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
0078 #undef ELF_NVRREG
0079 #define ELF_NVRREG ELF_NVRREG32
0080
0081
0082
0083
0084
0085 #define unsafe_put_sigset_t unsafe_put_compat_sigset
0086 #define unsafe_get_sigset_t unsafe_get_compat_sigset
0087
0088 #define to_user_ptr(p) ptr_to_compat(p)
0089 #define from_user_ptr(p) compat_ptr(p)
0090
0091 static __always_inline int
0092 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
0093 {
0094 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
0095 int val, i;
0096
0097 for (i = 0; i <= PT_RESULT; i ++) {
0098
0099 if (i == PT_SOFTE)
0100 val = 1;
0101 else
0102 val = gregs[i];
0103
0104 unsafe_put_user(val, &frame->mc_gregs[i], failed);
0105 }
0106 return 0;
0107
0108 failed:
0109 return 1;
0110 }
0111
0112 static __always_inline int
0113 __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
0114 {
0115 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
0116 int i;
0117
0118 for (i = 0; i <= PT_RESULT; i++) {
0119 if ((i == PT_MSR) || (i == PT_SOFTE))
0120 continue;
0121 unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
0122 }
0123 return 0;
0124
0125 failed:
0126 return 1;
0127 }
0128
0129 #else
0130
0131 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
0132
0133 #define unsafe_put_sigset_t(uset, set, label) do { \
0134 sigset_t __user *__us = uset ; \
0135 const sigset_t *__s = set; \
0136 \
0137 unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \
0138 } while (0)
0139
0140 #define unsafe_get_sigset_t unsafe_get_user_sigset
0141
0142 #define to_user_ptr(p) ((unsigned long)(p))
0143 #define from_user_ptr(p) ((void __user *)(p))
0144
0145 static __always_inline int
0146 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
0147 {
0148 unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
0149 return 0;
0150
0151 failed:
0152 return 1;
0153 }
0154
0155 static __always_inline
0156 int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
0157 {
0158
0159 unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
0160
0161
0162 unsafe_copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
0163 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
0164
0165 return 0;
0166
0167 failed:
0168 return 1;
0169 }
0170 #endif
0171
0172 #define unsafe_save_general_regs(regs, frame, label) do { \
0173 if (__unsafe_save_general_regs(regs, frame)) \
0174 goto label; \
0175 } while (0)
0176
0177 #define unsafe_restore_general_regs(regs, frame, label) do { \
0178 if (__unsafe_restore_general_regs(regs, frame)) \
0179 goto label; \
0180 } while (0)
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 struct sigframe {
0195 struct sigcontext sctx;
0196 struct mcontext mctx;
0197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0198 struct sigcontext sctx_transact;
0199 struct mcontext mctx_transact;
0200 #endif
0201
0202
0203
0204
0205 int abigap[56];
0206 };
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219 struct rt_sigframe {
0220 #ifdef CONFIG_PPC64
0221 compat_siginfo_t info;
0222 #else
0223 struct siginfo info;
0224 #endif
0225 struct ucontext uc;
0226 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0227 struct ucontext uc_transact;
0228 #endif
0229
0230
0231
0232
0233 int abigap[56];
0234 };
0235
0236 unsigned long get_min_sigframe_size_32(void)
0237 {
0238 return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
0239 sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
0240 }
0241
0242
0243
0244
0245
0246
0247 static void prepare_save_user_regs(int ctx_has_vsx_region)
0248 {
0249
0250 flush_fp_to_thread(current);
0251 #ifdef CONFIG_ALTIVEC
0252 if (current->thread.used_vr)
0253 flush_altivec_to_thread(current);
0254 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0255 current->thread.vrsave = mfspr(SPRN_VRSAVE);
0256 #endif
0257 #ifdef CONFIG_VSX
0258 if (current->thread.used_vsr && ctx_has_vsx_region)
0259 flush_vsx_to_thread(current);
0260 #endif
0261 #ifdef CONFIG_SPE
0262 if (current->thread.used_spe)
0263 flush_spe_to_thread(current);
0264 #endif
0265 }
0266
0267 static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
0268 struct mcontext __user *tm_frame, int ctx_has_vsx_region)
0269 {
0270 unsigned long msr = regs->msr;
0271
0272
0273 unsafe_save_general_regs(regs, frame, failed);
0274
0275 #ifdef CONFIG_ALTIVEC
0276
0277 if (current->thread.used_vr) {
0278 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
0279 ELF_NVRREG * sizeof(vector128), failed);
0280
0281
0282 msr |= MSR_VEC;
0283 }
0284
0285
0286
0287
0288
0289
0290
0291
0292 unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
0293 failed);
0294 #endif
0295 unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
0296
0297
0298
0299
0300
0301 msr &= ~MSR_VSX;
0302 #ifdef CONFIG_VSX
0303
0304
0305
0306
0307
0308
0309 if (current->thread.used_vsr && ctx_has_vsx_region) {
0310 unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
0311 msr |= MSR_VSX;
0312 }
0313 #endif
0314 #ifdef CONFIG_SPE
0315
0316 if (current->thread.used_spe) {
0317 unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
0318 ELF_NEVRREG * sizeof(u32), failed);
0319
0320
0321 msr |= MSR_SPE;
0322 }
0323
0324
0325
0326 unsafe_put_user(current->thread.spefscr,
0327 (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
0328 #endif
0329
0330 unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
0331
0332
0333
0334
0335 if (tm_frame)
0336 unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
0337
0338 return 0;
0339
0340 failed:
0341 return 1;
0342 }
0343
0344 #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
0345 if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \
0346 goto label; \
0347 } while (0)
0348
0349 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 static void prepare_save_tm_user_regs(void)
0360 {
0361 WARN_ON(tm_suspend_disabled);
0362
0363 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0364 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
0365 }
0366
0367 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
0368 struct mcontext __user *tm_frame, unsigned long msr)
0369 {
0370
0371 unsafe_save_general_regs(¤t->thread.ckpt_regs, frame, failed);
0372 unsafe_save_general_regs(regs, tm_frame, failed);
0373
0374
0375
0376
0377
0378
0379
0380 unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
0381
0382
0383 if (current->thread.used_vr) {
0384 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
0385 ELF_NVRREG * sizeof(vector128), failed);
0386 if (msr & MSR_VEC)
0387 unsafe_copy_to_user(&tm_frame->mc_vregs,
0388 ¤t->thread.vr_state,
0389 ELF_NVRREG * sizeof(vector128), failed);
0390 else
0391 unsafe_copy_to_user(&tm_frame->mc_vregs,
0392 ¤t->thread.ckvr_state,
0393 ELF_NVRREG * sizeof(vector128), failed);
0394
0395
0396
0397
0398 msr |= MSR_VEC;
0399 }
0400
0401
0402
0403
0404
0405
0406 unsafe_put_user(current->thread.ckvrsave,
0407 (u32 __user *)&frame->mc_vregs[32], failed);
0408 if (msr & MSR_VEC)
0409 unsafe_put_user(current->thread.vrsave,
0410 (u32 __user *)&tm_frame->mc_vregs[32], failed);
0411 else
0412 unsafe_put_user(current->thread.ckvrsave,
0413 (u32 __user *)&tm_frame->mc_vregs[32], failed);
0414
0415 unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
0416 if (msr & MSR_FP)
0417 unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
0418 else
0419 unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
0420
0421
0422
0423
0424
0425
0426
0427 if (current->thread.used_vsr) {
0428 unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
0429 if (msr & MSR_VSX)
0430 unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
0431 else
0432 unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
0433
0434 msr |= MSR_VSX;
0435 }
0436
0437 unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
0438
0439 return 0;
0440
0441 failed:
0442 return 1;
0443 }
0444 #else
0445 static void prepare_save_tm_user_regs(void) { }
0446
0447 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
0448 struct mcontext __user *tm_frame, unsigned long msr)
0449 {
0450 return 0;
0451 }
0452 #endif
0453
0454 #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
0455 if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr)) \
0456 goto label; \
0457 } while (0)
0458
0459
0460
0461
0462
0463 static long restore_user_regs(struct pt_regs *regs,
0464 struct mcontext __user *sr, int sig)
0465 {
0466 unsigned int save_r2 = 0;
0467 unsigned long msr;
0468 #ifdef CONFIG_VSX
0469 int i;
0470 #endif
0471
0472 if (!user_read_access_begin(sr, sizeof(*sr)))
0473 return 1;
0474
0475
0476
0477
0478 if (!sig)
0479 save_r2 = (unsigned int)regs->gpr[2];
0480 unsafe_restore_general_regs(regs, sr, failed);
0481 set_trap_norestart(regs);
0482 unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
0483 if (!sig)
0484 regs->gpr[2] = (unsigned long) save_r2;
0485
0486
0487 if (sig)
0488 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
0489
0490 #ifdef CONFIG_ALTIVEC
0491
0492
0493
0494
0495 regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
0496 if (msr & MSR_VEC) {
0497
0498 unsafe_copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
0499 sizeof(sr->mc_vregs), failed);
0500 current->thread.used_vr = true;
0501 } else if (current->thread.used_vr)
0502 memset(¤t->thread.vr_state, 0,
0503 ELF_NVRREG * sizeof(vector128));
0504
0505
0506 unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
0507 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0508 mtspr(SPRN_VRSAVE, current->thread.vrsave);
0509 #endif
0510 unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
0511
0512 #ifdef CONFIG_VSX
0513
0514
0515
0516
0517 regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
0518 if (msr & MSR_VSX) {
0519
0520
0521
0522
0523 unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
0524 current->thread.used_vsr = true;
0525 } else if (current->thread.used_vsr)
0526 for (i = 0; i < 32 ; i++)
0527 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0528 #endif
0529
0530
0531
0532
0533 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
0534
0535 #ifdef CONFIG_SPE
0536
0537
0538
0539
0540
0541 BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
0542 regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
0543 if (msr & MSR_SPE) {
0544
0545 unsafe_copy_from_user(¤t->thread.spe, &sr->mc_vregs,
0546 sizeof(current->thread.spe), failed);
0547 current->thread.used_spe = true;
0548 } else if (current->thread.used_spe)
0549 memset(¤t->thread.spe, 0, sizeof(current->thread.spe));
0550
0551
0552 unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
0553 #endif
0554
0555 user_read_access_end();
0556 return 0;
0557
0558 failed:
0559 user_read_access_end();
0560 return 1;
0561 }
0562
0563 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0564
0565
0566
0567
0568
0569 static long restore_tm_user_regs(struct pt_regs *regs,
0570 struct mcontext __user *sr,
0571 struct mcontext __user *tm_sr)
0572 {
0573 unsigned long msr, msr_hi;
0574 int i;
0575
0576 if (tm_suspend_disabled)
0577 return 1;
0578
0579
0580
0581
0582
0583
0584
0585 if (!user_read_access_begin(sr, sizeof(*sr)))
0586 return 1;
0587
0588 unsafe_restore_general_regs(¤t->thread.ckpt_regs, sr, failed);
0589 unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
0590 unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
0591
0592
0593 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
0594
0595 regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
0596 if (msr & MSR_VEC) {
0597
0598 unsafe_copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
0599 sizeof(sr->mc_vregs), failed);
0600 current->thread.used_vr = true;
0601 } else if (current->thread.used_vr) {
0602 memset(¤t->thread.vr_state, 0,
0603 ELF_NVRREG * sizeof(vector128));
0604 memset(¤t->thread.ckvr_state, 0,
0605 ELF_NVRREG * sizeof(vector128));
0606 }
0607
0608
0609 unsafe_get_user(current->thread.ckvrsave,
0610 (u32 __user *)&sr->mc_vregs[32], failed);
0611 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0612 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
0613
0614 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
0615
0616 unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
0617
0618 regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
0619 if (msr & MSR_VSX) {
0620
0621
0622
0623
0624 unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
0625 current->thread.used_vsr = true;
0626 } else if (current->thread.used_vsr)
0627 for (i = 0; i < 32 ; i++) {
0628 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0629 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0630 }
0631
0632 user_read_access_end();
0633
0634 if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
0635 return 1;
0636
0637 unsafe_restore_general_regs(regs, tm_sr, failed);
0638
0639
0640 if (msr & MSR_VEC)
0641 unsafe_copy_from_user(¤t->thread.vr_state, &tm_sr->mc_vregs,
0642 sizeof(sr->mc_vregs), failed);
0643
0644
0645 unsafe_get_user(current->thread.vrsave,
0646 (u32 __user *)&tm_sr->mc_vregs[32], failed);
0647
0648 unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
0649
0650 if (msr & MSR_VSX) {
0651
0652
0653
0654
0655 unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
0656 current->thread.used_vsr = true;
0657 }
0658
0659
0660 unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
0661 msr_hi <<= 32;
0662
0663 user_read_access_end();
0664
0665
0666 if (MSR_TM_RESV(msr_hi))
0667 return 1;
0668
0669
0670
0671
0672
0673 preempt_disable();
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
0686
0687
0688
0689
0690 tm_enable();
0691
0692 current->thread.tm_texasr |= TEXASR_FS;
0693
0694 tm_recheckpoint(¤t->thread);
0695
0696
0697 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
0698 if (msr & MSR_FP) {
0699 load_fp_state(¤t->thread.fp_state);
0700 regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
0701 }
0702 if (msr & MSR_VEC) {
0703 load_vr_state(¤t->thread.vr_state);
0704 regs_set_return_msr(regs, regs->msr | MSR_VEC);
0705 }
0706
0707 preempt_enable();
0708
0709 return 0;
0710
0711 failed:
0712 user_read_access_end();
0713 return 1;
0714 }
0715 #else
0716 static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
0717 struct mcontext __user *tm_sr)
0718 {
0719 return 0;
0720 }
0721 #endif
0722
0723 #ifdef CONFIG_PPC64
0724
0725 #define copy_siginfo_to_user copy_siginfo_to_user32
0726
0727 #endif
0728
0729
0730
0731
0732
0733 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
0734 struct task_struct *tsk)
0735 {
0736 struct rt_sigframe __user *frame;
0737 struct mcontext __user *mctx;
0738 struct mcontext __user *tm_mctx = NULL;
0739 unsigned long newsp = 0;
0740 unsigned long tramp;
0741 struct pt_regs *regs = tsk->thread.regs;
0742
0743 unsigned long msr = regs->msr;
0744
0745
0746 frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
0747 mctx = &frame->uc.uc_mcontext;
0748 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0749 tm_mctx = &frame->uc_transact.uc_mcontext;
0750 #endif
0751 if (MSR_TM_ACTIVE(msr))
0752 prepare_save_tm_user_regs();
0753 else
0754 prepare_save_user_regs(1);
0755
0756 if (!user_access_begin(frame, sizeof(*frame)))
0757 goto badframe;
0758
0759
0760 unsafe_put_user(0, &frame->uc.uc_flags, failed);
0761 #ifdef CONFIG_PPC64
0762 unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
0763 #else
0764 unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
0765 #endif
0766 unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
0767
0768 if (MSR_TM_ACTIVE(msr)) {
0769 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0770 unsafe_put_user((unsigned long)&frame->uc_transact,
0771 &frame->uc.uc_link, failed);
0772 unsafe_put_user((unsigned long)tm_mctx,
0773 &frame->uc_transact.uc_regs, failed);
0774 #endif
0775 unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
0776 } else {
0777 unsafe_put_user(0, &frame->uc.uc_link, failed);
0778 unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
0779 }
0780
0781
0782 if (tsk->mm->context.vdso) {
0783 tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
0784 } else {
0785 tramp = (unsigned long)mctx->mc_pad;
0786 unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
0787 unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
0788 asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
0789 }
0790 unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
0791
0792 user_access_end();
0793
0794 if (copy_siginfo_to_user(&frame->info, &ksig->info))
0795 goto badframe;
0796
0797 regs->link = tramp;
0798
0799 #ifdef CONFIG_PPC_FPU_REGS
0800 tsk->thread.fp_state.fpscr = 0;
0801 #endif
0802
0803
0804 newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
0805 if (put_user(regs->gpr[1], (u32 __user *)newsp))
0806 goto badframe;
0807
0808
0809 regs->gpr[1] = newsp;
0810 regs->gpr[3] = ksig->sig;
0811 regs->gpr[4] = (unsigned long)&frame->info;
0812 regs->gpr[5] = (unsigned long)&frame->uc;
0813 regs->gpr[6] = (unsigned long)frame;
0814 regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
0815
0816 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
0817
0818 return 0;
0819
0820 failed:
0821 user_access_end();
0822
0823 badframe:
0824 signal_fault(tsk, regs, "handle_rt_signal32", frame);
0825
0826 return 1;
0827 }
0828
0829
0830
0831
0832 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
0833 struct task_struct *tsk)
0834 {
0835 struct sigcontext __user *sc;
0836 struct sigframe __user *frame;
0837 struct mcontext __user *mctx;
0838 struct mcontext __user *tm_mctx = NULL;
0839 unsigned long newsp = 0;
0840 unsigned long tramp;
0841 struct pt_regs *regs = tsk->thread.regs;
0842
0843 unsigned long msr = regs->msr;
0844
0845
0846 frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
0847 mctx = &frame->mctx;
0848 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0849 tm_mctx = &frame->mctx_transact;
0850 #endif
0851 if (MSR_TM_ACTIVE(msr))
0852 prepare_save_tm_user_regs();
0853 else
0854 prepare_save_user_regs(1);
0855
0856 if (!user_access_begin(frame, sizeof(*frame)))
0857 goto badframe;
0858 sc = (struct sigcontext __user *) &frame->sctx;
0859
0860 #if _NSIG != 64
0861 #error "Please adjust handle_signal()"
0862 #endif
0863 unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
0864 unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
0865 #ifdef CONFIG_PPC64
0866 unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
0867 #else
0868 unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
0869 #endif
0870 unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
0871 unsafe_put_user(ksig->sig, &sc->signal, failed);
0872
0873 if (MSR_TM_ACTIVE(msr))
0874 unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
0875 else
0876 unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
0877
0878 if (tsk->mm->context.vdso) {
0879 tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
0880 } else {
0881 tramp = (unsigned long)mctx->mc_pad;
0882 unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
0883 unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
0884 asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
0885 }
0886 user_access_end();
0887
0888 regs->link = tramp;
0889
0890 #ifdef CONFIG_PPC_FPU_REGS
0891 tsk->thread.fp_state.fpscr = 0;
0892 #endif
0893
0894
0895 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
0896 if (put_user(regs->gpr[1], (u32 __user *)newsp))
0897 goto badframe;
0898
0899 regs->gpr[1] = newsp;
0900 regs->gpr[3] = ksig->sig;
0901 regs->gpr[4] = (unsigned long) sc;
0902 regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
0903
0904 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
0905
0906 return 0;
0907
0908 failed:
0909 user_access_end();
0910
0911 badframe:
0912 signal_fault(tsk, regs, "handle_signal32", frame);
0913
0914 return 1;
0915 }
0916
0917 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
0918 {
0919 sigset_t set;
0920 struct mcontext __user *mcp;
0921
0922 if (!user_read_access_begin(ucp, sizeof(*ucp)))
0923 return -EFAULT;
0924
0925 unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
0926 #ifdef CONFIG_PPC64
0927 {
0928 u32 cmcp;
0929
0930 unsafe_get_user(cmcp, &ucp->uc_regs, failed);
0931 mcp = (struct mcontext __user *)(u64)cmcp;
0932 }
0933 #else
0934 unsafe_get_user(mcp, &ucp->uc_regs, failed);
0935 #endif
0936 user_read_access_end();
0937
0938 set_current_blocked(&set);
0939 if (restore_user_regs(regs, mcp, sig))
0940 return -EFAULT;
0941
0942 return 0;
0943
0944 failed:
0945 user_read_access_end();
0946 return -EFAULT;
0947 }
0948
0949 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0950 static int do_setcontext_tm(struct ucontext __user *ucp,
0951 struct ucontext __user *tm_ucp,
0952 struct pt_regs *regs)
0953 {
0954 sigset_t set;
0955 struct mcontext __user *mcp;
0956 struct mcontext __user *tm_mcp;
0957 u32 cmcp;
0958 u32 tm_cmcp;
0959
0960 if (!user_read_access_begin(ucp, sizeof(*ucp)))
0961 return -EFAULT;
0962
0963 unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
0964 unsafe_get_user(cmcp, &ucp->uc_regs, failed);
0965
0966 user_read_access_end();
0967
0968 if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
0969 return -EFAULT;
0970 mcp = (struct mcontext __user *)(u64)cmcp;
0971 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
0972
0973
0974 set_current_blocked(&set);
0975 if (restore_tm_user_regs(regs, mcp, tm_mcp))
0976 return -EFAULT;
0977
0978 return 0;
0979
0980 failed:
0981 user_read_access_end();
0982 return -EFAULT;
0983 }
0984 #endif
0985
0986 #ifdef CONFIG_PPC64
0987 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
0988 struct ucontext __user *, new_ctx, int, ctx_size)
0989 #else
0990 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
0991 struct ucontext __user *, new_ctx, long, ctx_size)
0992 #endif
0993 {
0994 struct pt_regs *regs = current_pt_regs();
0995 int ctx_has_vsx_region = 0;
0996
0997 #ifdef CONFIG_PPC64
0998 unsigned long new_msr = 0;
0999
1000 if (new_ctx) {
1001 struct mcontext __user *mcp;
1002 u32 cmcp;
1003
1004
1005
1006
1007
1008
1009 if (__get_user(cmcp, &new_ctx->uc_regs))
1010 return -EFAULT;
1011 mcp = (struct mcontext __user *)(u64)cmcp;
1012 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1013 return -EFAULT;
1014 }
1015
1016
1017
1018
1019 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1020 return -EINVAL;
1021
1022
1023
1024
1025 if ((ctx_size < sizeof(struct ucontext)) &&
1026 (new_msr & MSR_VSX))
1027 return -EINVAL;
1028
1029 if (ctx_size >= sizeof(struct ucontext))
1030 ctx_has_vsx_region = 1;
1031 #else
1032
1033
1034
1035 if (ctx_size < sizeof(struct ucontext))
1036 return -EINVAL;
1037 #endif
1038 if (old_ctx != NULL) {
1039 struct mcontext __user *mctx;
1040
1041
1042
1043
1044
1045
1046
1047
1048 mctx = (struct mcontext __user *)
1049 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1050 prepare_save_user_regs(ctx_has_vsx_region);
1051 if (!user_write_access_begin(old_ctx, ctx_size))
1052 return -EFAULT;
1053 unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1054 unsafe_put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked, failed);
1055 unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1056 user_write_access_end();
1057 }
1058 if (new_ctx == NULL)
1059 return 0;
1060 if (!access_ok(new_ctx, ctx_size) ||
1061 fault_in_readable((char __user *)new_ctx, ctx_size))
1062 return -EFAULT;
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 if (do_setcontext(new_ctx, regs, 0)) {
1076 force_exit_sig(SIGSEGV);
1077 return -EFAULT;
1078 }
1079
1080 set_thread_flag(TIF_RESTOREALL);
1081 return 0;
1082
1083 failed:
1084 user_write_access_end();
1085 return -EFAULT;
1086 }
1087
1088 #ifdef CONFIG_PPC64
1089 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1090 #else
1091 SYSCALL_DEFINE0(rt_sigreturn)
1092 #endif
1093 {
1094 struct rt_sigframe __user *rt_sf;
1095 struct pt_regs *regs = current_pt_regs();
1096 int tm_restore = 0;
1097 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1098 struct ucontext __user *uc_transact;
1099 unsigned long msr_hi;
1100 unsigned long tmp;
1101 #endif
1102
1103 current->restart_block.fn = do_no_restart_syscall;
1104
1105 rt_sf = (struct rt_sigframe __user *)
1106 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1107 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1108 goto bad;
1109
1110 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (MSR_TM_SUSPENDED(mfmsr()))
1122 tm_reclaim_current(0);
1123
1124 if (__get_user(tmp, &rt_sf->uc.uc_link))
1125 goto bad;
1126 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1127 if (uc_transact) {
1128 u32 cmcp;
1129 struct mcontext __user *mcp;
1130
1131 if (__get_user(cmcp, &uc_transact->uc_regs))
1132 return -EFAULT;
1133 mcp = (struct mcontext __user *)(u64)cmcp;
1134
1135
1136 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1137 goto bad;
1138
1139 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1140
1141 if (!cpu_has_feature(CPU_FTR_TM))
1142 goto bad;
1143
1144
1145
1146 tm_restore = 1;
1147 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1148 goto bad;
1149 }
1150 }
1151 if (!tm_restore) {
1152
1153
1154
1155
1156
1157 regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
1158 }
1159
1160 #endif
1161 if (!tm_restore)
1162 if (do_setcontext(&rt_sf->uc, regs, 1))
1163 goto bad;
1164
1165
1166
1167
1168
1169
1170
1171
1172 #ifdef CONFIG_PPC64
1173 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1174 goto bad;
1175 #else
1176 if (restore_altstack(&rt_sf->uc.uc_stack))
1177 goto bad;
1178 #endif
1179 set_thread_flag(TIF_RESTOREALL);
1180 return 0;
1181
1182 bad:
1183 signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1184
1185 force_sig(SIGSEGV);
1186 return 0;
1187 }
1188
1189 #ifdef CONFIG_PPC32
1190 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1191 int, ndbg, struct sig_dbg_op __user *, dbg)
1192 {
1193 struct pt_regs *regs = current_pt_regs();
1194 struct sig_dbg_op op;
1195 int i;
1196 unsigned long new_msr = regs->msr;
1197 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1198 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1199 #endif
1200
1201 for (i=0; i<ndbg; i++) {
1202 if (copy_from_user(&op, dbg + i, sizeof(op)))
1203 return -EFAULT;
1204 switch (op.dbg_type) {
1205 case SIG_DBG_SINGLE_STEPPING:
1206 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1207 if (op.dbg_value) {
1208 new_msr |= MSR_DE;
1209 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1210 } else {
1211 new_dbcr0 &= ~DBCR0_IC;
1212 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1213 current->thread.debug.dbcr1)) {
1214 new_msr &= ~MSR_DE;
1215 new_dbcr0 &= ~DBCR0_IDM;
1216 }
1217 }
1218 #else
1219 if (op.dbg_value)
1220 new_msr |= MSR_SE;
1221 else
1222 new_msr &= ~MSR_SE;
1223 #endif
1224 break;
1225 case SIG_DBG_BRANCH_TRACING:
1226 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1227 return -EINVAL;
1228 #else
1229 if (op.dbg_value)
1230 new_msr |= MSR_BE;
1231 else
1232 new_msr &= ~MSR_BE;
1233 #endif
1234 break;
1235
1236 default:
1237 return -EINVAL;
1238 }
1239 }
1240
1241
1242
1243
1244
1245
1246 regs_set_return_msr(regs, new_msr);
1247 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1248 current->thread.debug.dbcr0 = new_dbcr0;
1249 #endif
1250
1251 if (!access_ok(ctx, sizeof(*ctx)) ||
1252 fault_in_readable((char __user *)ctx, sizeof(*ctx)))
1253 return -EFAULT;
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266 if (do_setcontext(ctx, regs, 1)) {
1267 signal_fault(current, regs, "sys_debug_setcontext", ctx);
1268
1269 force_sig(SIGSEGV);
1270 goto out;
1271 }
1272
1273
1274
1275
1276
1277
1278
1279
1280 restore_altstack(&ctx->uc_stack);
1281
1282 set_thread_flag(TIF_RESTOREALL);
1283 out:
1284 return 0;
1285 }
1286 #endif
1287
1288
1289
1290
1291 #ifdef CONFIG_PPC64
1292 COMPAT_SYSCALL_DEFINE0(sigreturn)
1293 #else
1294 SYSCALL_DEFINE0(sigreturn)
1295 #endif
1296 {
1297 struct pt_regs *regs = current_pt_regs();
1298 struct sigframe __user *sf;
1299 struct sigcontext __user *sc;
1300 struct sigcontext sigctx;
1301 struct mcontext __user *sr;
1302 sigset_t set;
1303 struct mcontext __user *mcp;
1304 struct mcontext __user *tm_mcp = NULL;
1305 unsigned long long msr_hi = 0;
1306
1307
1308 current->restart_block.fn = do_no_restart_syscall;
1309
1310 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1311 sc = &sf->sctx;
1312 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1313 goto badframe;
1314
1315 #ifdef CONFIG_PPC64
1316
1317
1318
1319
1320 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1321 #else
1322 set.sig[0] = sigctx.oldmask;
1323 set.sig[1] = sigctx._unused[3];
1324 #endif
1325 set_current_blocked(&set);
1326
1327 mcp = (struct mcontext __user *)&sf->mctx;
1328 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1329 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1330 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1331 goto badframe;
1332 #endif
1333 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1334 if (!cpu_has_feature(CPU_FTR_TM))
1335 goto badframe;
1336 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1337 goto badframe;
1338 } else {
1339 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1340 if (restore_user_regs(regs, sr, 1)) {
1341 signal_fault(current, regs, "sys_sigreturn", sr);
1342
1343 force_sig(SIGSEGV);
1344 return 0;
1345 }
1346 }
1347
1348 set_thread_flag(TIF_RESTOREALL);
1349 return 0;
1350
1351 badframe:
1352 signal_fault(current, regs, "sys_sigreturn", sc);
1353
1354 force_sig(SIGSEGV);
1355 return 0;
1356 }