0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/sched.h>
0012 #include <linux/mm.h>
0013 #include <linux/smp.h>
0014 #include <linux/kernel.h>
0015 #include <linux/signal.h>
0016 #include <linux/errno.h>
0017 #include <linux/wait.h>
0018 #include <linux/unistd.h>
0019 #include <linux/stddef.h>
0020 #include <linux/elf.h>
0021 #include <linux/ptrace.h>
0022 #include <linux/ratelimit.h>
0023 #include <linux/syscalls.h>
0024 #include <linux/pagemap.h>
0025
0026 #include <asm/sigcontext.h>
0027 #include <asm/ucontext.h>
0028 #include <linux/uaccess.h>
0029 #include <asm/unistd.h>
0030 #include <asm/cacheflush.h>
0031 #include <asm/syscalls.h>
0032 #include <asm/vdso.h>
0033 #include <asm/switch_to.h>
0034 #include <asm/tm.h>
0035 #include <asm/asm-prototypes.h>
0036
0037 #include "signal.h"
0038
0039
0040 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
0041 #define FP_REGS_SIZE sizeof(elf_fpregset_t)
0042
0043 #define TRAMP_TRACEBACK 4
0044 #define TRAMP_SIZE 7
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 struct rt_sigframe {
0055
0056 struct ucontext uc;
0057 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0058 struct ucontext uc_transact;
0059 #endif
0060 unsigned long _unused[2];
0061 unsigned int tramp[TRAMP_SIZE];
0062 struct siginfo __user *pinfo;
0063 void __user *puc;
0064 struct siginfo info;
0065
0066 char abigap[USER_REDZONE_SIZE];
0067 } __attribute__ ((aligned (16)));
0068
0069 unsigned long get_min_sigframe_size_64(void)
0070 {
0071 return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE;
0072 }
0073
0074
0075
0076
0077
0078
0079
0080 #ifdef CONFIG_ALTIVEC
0081 static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
0082 {
0083 return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
0084 }
0085 #endif
0086
0087 static void prepare_setup_sigcontext(struct task_struct *tsk)
0088 {
0089 #ifdef CONFIG_ALTIVEC
0090
0091 if (tsk->thread.used_vr)
0092 flush_altivec_to_thread(tsk);
0093 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0094 tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
0095 #endif
0096
0097 flush_fp_to_thread(tsk);
0098
0099 #ifdef CONFIG_VSX
0100 if (tsk->thread.used_vsr)
0101 flush_vsx_to_thread(tsk);
0102 #endif
0103 }
0104
0105
0106
0107
0108
0109 #define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\
0110 do { \
0111 if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\
0112 goto label; \
0113 } while (0)
0114 static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
0115 struct task_struct *tsk, int signr, sigset_t *set,
0116 unsigned long handler, int ctx_has_vsx_region)
0117 {
0118
0119
0120
0121
0122
0123
0124
0125
0126 #ifdef CONFIG_ALTIVEC
0127 elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
0128 #endif
0129 struct pt_regs *regs = tsk->thread.regs;
0130 unsigned long msr = regs->msr;
0131
0132 unsigned long softe = 0x1;
0133
0134 BUG_ON(tsk != current);
0135
0136 #ifdef CONFIG_ALTIVEC
0137 unsafe_put_user(v_regs, &sc->v_regs, efault_out);
0138
0139
0140 if (tsk->thread.used_vr) {
0141
0142 unsafe_copy_to_user(v_regs, &tsk->thread.vr_state,
0143 33 * sizeof(vector128), efault_out);
0144
0145
0146
0147 msr |= MSR_VEC;
0148 }
0149
0150
0151
0152 unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
0153 #else
0154 unsafe_put_user(0, &sc->v_regs, efault_out);
0155 #endif
0156
0157 unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out);
0158
0159
0160
0161
0162
0163 msr &= ~MSR_VSX;
0164 #ifdef CONFIG_VSX
0165
0166
0167
0168
0169
0170 if (tsk->thread.used_vsr && ctx_has_vsx_region) {
0171 v_regs += ELF_NVRREG;
0172 unsafe_copy_vsx_to_user(v_regs, tsk, efault_out);
0173
0174
0175
0176 msr |= MSR_VSX;
0177 }
0178 #endif
0179 unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out);
0180 unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out);
0181 unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out);
0182 unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out);
0183 unsafe_put_user(signr, &sc->signal, efault_out);
0184 unsafe_put_user(handler, &sc->handler, efault_out);
0185 if (set != NULL)
0186 unsafe_put_user(set->sig[0], &sc->oldmask, efault_out);
0187
0188 return 0;
0189
0190 efault_out:
0191 return -EFAULT;
0192 }
0193
0194 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 static long setup_tm_sigcontexts(struct sigcontext __user *sc,
0207 struct sigcontext __user *tm_sc,
0208 struct task_struct *tsk,
0209 int signr, sigset_t *set, unsigned long handler,
0210 unsigned long msr)
0211 {
0212
0213
0214
0215
0216
0217
0218
0219
0220 #ifdef CONFIG_ALTIVEC
0221 elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
0222 elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
0223 #endif
0224 struct pt_regs *regs = tsk->thread.regs;
0225 long err = 0;
0226
0227 BUG_ON(tsk != current);
0228
0229 BUG_ON(!MSR_TM_ACTIVE(msr));
0230
0231 WARN_ON(tm_suspend_disabled);
0232
0233
0234
0235
0236
0237 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
0238
0239 #ifdef CONFIG_ALTIVEC
0240 err |= __put_user(v_regs, &sc->v_regs);
0241 err |= __put_user(tm_v_regs, &tm_sc->v_regs);
0242
0243
0244 if (tsk->thread.used_vr) {
0245
0246 err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state,
0247 33 * sizeof(vector128));
0248
0249
0250
0251 if (msr & MSR_VEC)
0252 err |= __copy_to_user(tm_v_regs,
0253 &tsk->thread.vr_state,
0254 33 * sizeof(vector128));
0255 else
0256 err |= __copy_to_user(tm_v_regs,
0257 &tsk->thread.ckvr_state,
0258 33 * sizeof(vector128));
0259
0260
0261
0262
0263 msr |= MSR_VEC;
0264 }
0265
0266
0267
0268 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0269 tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE);
0270 err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]);
0271 if (msr & MSR_VEC)
0272 err |= __put_user(tsk->thread.vrsave,
0273 (u32 __user *)&tm_v_regs[33]);
0274 else
0275 err |= __put_user(tsk->thread.ckvrsave,
0276 (u32 __user *)&tm_v_regs[33]);
0277
0278 #else
0279 err |= __put_user(0, &sc->v_regs);
0280 err |= __put_user(0, &tm_sc->v_regs);
0281 #endif
0282
0283
0284 err |= copy_ckfpr_to_user(&sc->fp_regs, tsk);
0285 if (msr & MSR_FP)
0286 err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk);
0287 else
0288 err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk);
0289
0290 #ifdef CONFIG_VSX
0291
0292
0293
0294
0295
0296 if (tsk->thread.used_vsr) {
0297 v_regs += ELF_NVRREG;
0298 tm_v_regs += ELF_NVRREG;
0299
0300 err |= copy_ckvsx_to_user(v_regs, tsk);
0301
0302 if (msr & MSR_VSX)
0303 err |= copy_vsx_to_user(tm_v_regs, tsk);
0304 else
0305 err |= copy_ckvsx_to_user(tm_v_regs, tsk);
0306
0307
0308
0309
0310 msr |= MSR_VSX;
0311 }
0312 #endif
0313
0314 err |= __put_user(&sc->gp_regs, &sc->regs);
0315 err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
0316 err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
0317 err |= __copy_to_user(&sc->gp_regs,
0318 &tsk->thread.ckpt_regs, GP_REGS_SIZE);
0319 err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
0320 err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
0321 err |= __put_user(signr, &sc->signal);
0322 err |= __put_user(handler, &sc->handler);
0323 if (set != NULL)
0324 err |= __put_user(set->sig[0], &sc->oldmask);
0325
0326 return err;
0327 }
0328 #endif
0329
0330
0331
0332
0333 #define unsafe_restore_sigcontext(tsk, set, sig, sc, label) do { \
0334 if (__unsafe_restore_sigcontext(tsk, set, sig, sc)) \
0335 goto label; \
0336 } while (0)
0337 static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set,
0338 int sig, struct sigcontext __user *sc)
0339 {
0340 #ifdef CONFIG_ALTIVEC
0341 elf_vrreg_t __user *v_regs;
0342 #endif
0343 unsigned long save_r13 = 0;
0344 unsigned long msr;
0345 struct pt_regs *regs = tsk->thread.regs;
0346 #ifdef CONFIG_VSX
0347 int i;
0348 #endif
0349
0350 BUG_ON(tsk != current);
0351
0352
0353 if (!sig)
0354 save_r13 = regs->gpr[13];
0355
0356
0357 unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), efault_out);
0358 unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out);
0359
0360 unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out);
0361 if (sig)
0362 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
0363 unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out);
0364 unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out);
0365 unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out);
0366 unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out);
0367 unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out);
0368
0369 set_trap_norestart(regs);
0370 unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out);
0371 unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out);
0372 unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out);
0373
0374 if (!sig)
0375 regs->gpr[13] = save_r13;
0376 if (set != NULL)
0377 unsafe_get_user(set->sig[0], &sc->oldmask, efault_out);
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
0388
0389 #ifdef CONFIG_ALTIVEC
0390 unsafe_get_user(v_regs, &sc->v_regs, efault_out);
0391 if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
0392 return -EFAULT;
0393
0394 if (v_regs != NULL && (msr & MSR_VEC) != 0) {
0395 unsafe_copy_from_user(&tsk->thread.vr_state, v_regs,
0396 33 * sizeof(vector128), efault_out);
0397 tsk->thread.used_vr = true;
0398 } else if (tsk->thread.used_vr) {
0399 memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
0400 }
0401
0402 if (v_regs != NULL)
0403 unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
0404 else
0405 tsk->thread.vrsave = 0;
0406 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0407 mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
0408 #endif
0409
0410 unsafe_copy_fpr_from_user(tsk, &sc->fp_regs, efault_out);
0411 #ifdef CONFIG_VSX
0412
0413
0414
0415
0416
0417 v_regs += ELF_NVRREG;
0418 if ((msr & MSR_VSX) != 0) {
0419 unsafe_copy_vsx_from_user(tsk, v_regs, efault_out);
0420 tsk->thread.used_vsr = true;
0421 } else {
0422 for (i = 0; i < 32 ; i++)
0423 tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0424 }
0425 #endif
0426 return 0;
0427
0428 efault_out:
0429 return -EFAULT;
0430 }
0431
0432 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0433
0434
0435
0436
0437 static long restore_tm_sigcontexts(struct task_struct *tsk,
0438 struct sigcontext __user *sc,
0439 struct sigcontext __user *tm_sc)
0440 {
0441 #ifdef CONFIG_ALTIVEC
0442 elf_vrreg_t __user *v_regs, *tm_v_regs;
0443 #endif
0444 unsigned long err = 0;
0445 unsigned long msr;
0446 struct pt_regs *regs = tsk->thread.regs;
0447 #ifdef CONFIG_VSX
0448 int i;
0449 #endif
0450
0451 BUG_ON(tsk != current);
0452
0453 if (tm_suspend_disabled)
0454 return -EINVAL;
0455
0456
0457 err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
0458 err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
0459 sizeof(regs->gpr));
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469 err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
0470 err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
0471
0472
0473 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
0474
0475 if (MSR_TM_RESV(msr))
0476 return -EINVAL;
0477
0478
0479 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
0480
0481
0482 err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
0483 err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
0484 err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
0485 err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
0486 err |= __get_user(tsk->thread.ckpt_regs.ctr,
0487 &sc->gp_regs[PT_CTR]);
0488 err |= __get_user(tsk->thread.ckpt_regs.link,
0489 &sc->gp_regs[PT_LNK]);
0490 err |= __get_user(tsk->thread.ckpt_regs.xer,
0491 &sc->gp_regs[PT_XER]);
0492 err |= __get_user(tsk->thread.ckpt_regs.ccr,
0493 &sc->gp_regs[PT_CCR]);
0494
0495 set_trap_norestart(regs);
0496
0497 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
0498 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
0499 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
0500
0501
0502
0503
0504
0505
0506 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
0507
0508 #ifdef CONFIG_ALTIVEC
0509 err |= __get_user(v_regs, &sc->v_regs);
0510 err |= __get_user(tm_v_regs, &tm_sc->v_regs);
0511 if (err)
0512 return err;
0513 if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
0514 return -EFAULT;
0515 if (tm_v_regs && !access_ok(tm_v_regs, 34 * sizeof(vector128)))
0516 return -EFAULT;
0517
0518 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
0519 err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs,
0520 33 * sizeof(vector128));
0521 err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs,
0522 33 * sizeof(vector128));
0523 current->thread.used_vr = true;
0524 }
0525 else if (tsk->thread.used_vr) {
0526 memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
0527 memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128));
0528 }
0529
0530 if (v_regs != NULL && tm_v_regs != NULL) {
0531 err |= __get_user(tsk->thread.ckvrsave,
0532 (u32 __user *)&v_regs[33]);
0533 err |= __get_user(tsk->thread.vrsave,
0534 (u32 __user *)&tm_v_regs[33]);
0535 }
0536 else {
0537 tsk->thread.vrsave = 0;
0538 tsk->thread.ckvrsave = 0;
0539 }
0540 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0541 mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
0542 #endif
0543
0544 err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs);
0545 err |= copy_ckfpr_from_user(tsk, &sc->fp_regs);
0546 #ifdef CONFIG_VSX
0547
0548
0549
0550
0551
0552 if (v_regs && ((msr & MSR_VSX) != 0)) {
0553 v_regs += ELF_NVRREG;
0554 tm_v_regs += ELF_NVRREG;
0555 err |= copy_vsx_from_user(tsk, tm_v_regs);
0556 err |= copy_ckvsx_from_user(tsk, v_regs);
0557 tsk->thread.used_vsr = true;
0558 } else {
0559 for (i = 0; i < 32 ; i++) {
0560 tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0561 tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
0562 }
0563 }
0564 #endif
0565 tm_enable();
0566
0567 tsk->thread.tm_texasr |= TEXASR_FS;
0568
0569
0570
0571
0572
0573 preempt_disable();
0574
0575
0576 regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594 regs_set_return_msr(regs, regs->msr | MSR_TM);
0595
0596
0597 tm_recheckpoint(&tsk->thread);
0598
0599 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
0600 if (msr & MSR_FP) {
0601 load_fp_state(&tsk->thread.fp_state);
0602 regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
0603 }
0604 if (msr & MSR_VEC) {
0605 load_vr_state(&tsk->thread.vr_state);
0606 regs_set_return_msr(regs, regs->msr | MSR_VEC);
0607 }
0608
0609 preempt_enable();
0610
0611 return err;
0612 }
0613 #else
0614 static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc,
0615 struct sigcontext __user *tm_sc)
0616 {
0617 return -EINVAL;
0618 }
0619 #endif
0620
0621
0622
0623
0624 static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
0625 {
0626 int i;
0627 long err = 0;
0628
0629
0630 err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]);
0631 err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]);
0632
0633 err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]);
0634 err |= __put_user(PPC_RAW_SC(), &tramp[3]);
0635
0636
0637 for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
0638 err |= __put_user(0, &tramp[i]);
0639
0640 if (!err)
0641 flush_icache_range((unsigned long) &tramp[0],
0642 (unsigned long) &tramp[TRAMP_SIZE]);
0643
0644 return err;
0645 }
0646
0647
0648
0649
0650
0651 #define UCONTEXTSIZEWITHOUTVSX \
0652 (sizeof(struct ucontext) - 32*sizeof(long))
0653
0654
0655
0656
0657 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
0658 struct ucontext __user *, new_ctx, long, ctx_size)
0659 {
0660 sigset_t set;
0661 unsigned long new_msr = 0;
0662 int ctx_has_vsx_region = 0;
0663
0664 if (new_ctx &&
0665 get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
0666 return -EFAULT;
0667
0668
0669
0670
0671 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
0672 return -EINVAL;
0673
0674
0675
0676
0677 if ((ctx_size < sizeof(struct ucontext)) &&
0678 (new_msr & MSR_VSX))
0679 return -EINVAL;
0680
0681 if (ctx_size >= sizeof(struct ucontext))
0682 ctx_has_vsx_region = 1;
0683
0684 if (old_ctx != NULL) {
0685 prepare_setup_sigcontext(current);
0686 if (!user_write_access_begin(old_ctx, ctx_size))
0687 return -EFAULT;
0688
0689 unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL,
0690 0, ctx_has_vsx_region, efault_out);
0691 unsafe_copy_to_user(&old_ctx->uc_sigmask, ¤t->blocked,
0692 sizeof(sigset_t), efault_out);
0693
0694 user_write_access_end();
0695 }
0696 if (new_ctx == NULL)
0697 return 0;
0698 if (!access_ok(new_ctx, ctx_size) ||
0699 fault_in_readable((char __user *)new_ctx, ctx_size))
0700 return -EFAULT;
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714 if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
0715 force_exit_sig(SIGSEGV);
0716 return -EFAULT;
0717 }
0718 set_current_blocked(&set);
0719
0720 if (!user_read_access_begin(new_ctx, ctx_size))
0721 return -EFAULT;
0722 if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
0723 user_read_access_end();
0724 force_exit_sig(SIGSEGV);
0725 return -EFAULT;
0726 }
0727 user_read_access_end();
0728
0729
0730 set_thread_flag(TIF_RESTOREALL);
0731
0732 return 0;
0733
0734 efault_out:
0735 user_write_access_end();
0736 return -EFAULT;
0737 }
0738
0739
0740
0741
0742
0743
0744 SYSCALL_DEFINE0(rt_sigreturn)
0745 {
0746 struct pt_regs *regs = current_pt_regs();
0747 struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
0748 sigset_t set;
0749 unsigned long msr;
0750
0751
0752 current->restart_block.fn = do_no_restart_syscall;
0753
0754 if (!access_ok(uc, sizeof(*uc)))
0755 goto badframe;
0756
0757 if (__get_user_sigset(&set, &uc->uc_sigmask))
0758 goto badframe;
0759 set_current_blocked(&set);
0760
0761 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) {
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772 if (MSR_TM_SUSPENDED(mfmsr()))
0773 tm_reclaim_current(0);
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798 regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
0799
0800 if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
0801 goto badframe;
0802 }
0803
0804 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) {
0805
0806 struct ucontext __user *uc_transact;
0807
0808
0809 if (!cpu_has_feature(CPU_FTR_TM))
0810 goto badframe;
0811
0812 if (__get_user(uc_transact, &uc->uc_link))
0813 goto badframe;
0814 if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
0815 &uc_transact->uc_mcontext))
0816 goto badframe;
0817 } else {
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830 regs_set_return_msr(current->thread.regs,
0831 current->thread.regs->msr & ~MSR_TS_MASK);
0832 if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext)))
0833 goto badframe;
0834
0835 unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext,
0836 badframe_block);
0837
0838 user_read_access_end();
0839 }
0840
0841 if (restore_altstack(&uc->uc_stack))
0842 goto badframe;
0843
0844 set_thread_flag(TIF_RESTOREALL);
0845
0846 return 0;
0847
0848 badframe_block:
0849 user_read_access_end();
0850 badframe:
0851 signal_fault(current, regs, "rt_sigreturn", uc);
0852
0853 force_sig(SIGSEGV);
0854 return 0;
0855 }
0856
0857 int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
0858 struct task_struct *tsk)
0859 {
0860 struct rt_sigframe __user *frame;
0861 unsigned long newsp = 0;
0862 long err = 0;
0863 struct pt_regs *regs = tsk->thread.regs;
0864
0865 unsigned long msr = regs->msr;
0866
0867 frame = get_sigframe(ksig, tsk, sizeof(*frame), 0);
0868
0869
0870
0871
0872
0873 if (!MSR_TM_ACTIVE(msr))
0874 prepare_setup_sigcontext(tsk);
0875
0876 if (!user_write_access_begin(frame, sizeof(*frame)))
0877 goto badframe;
0878
0879 unsafe_put_user(&frame->info, &frame->pinfo, badframe_block);
0880 unsafe_put_user(&frame->uc, &frame->puc, badframe_block);
0881
0882
0883 unsafe_put_user(0, &frame->uc.uc_flags, badframe_block);
0884 unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block);
0885
0886 if (MSR_TM_ACTIVE(msr)) {
0887 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0888
0889
0890
0891 unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block);
0892
0893 user_write_access_end();
0894
0895 err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
0896 &frame->uc_transact.uc_mcontext,
0897 tsk, ksig->sig, NULL,
0898 (unsigned long)ksig->ka.sa.sa_handler,
0899 msr);
0900
0901 if (!user_write_access_begin(&frame->uc.uc_sigmask,
0902 sizeof(frame->uc.uc_sigmask)))
0903 goto badframe;
0904
0905 #endif
0906 } else {
0907 unsafe_put_user(0, &frame->uc.uc_link, badframe_block);
0908 unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
0909 NULL, (unsigned long)ksig->ka.sa.sa_handler,
0910 1, badframe_block);
0911 }
0912
0913 unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
0914 user_write_access_end();
0915
0916
0917 if (copy_siginfo_to_user(&frame->info, &ksig->info))
0918 goto badframe;
0919
0920
0921 tsk->thread.fp_state.fpscr = 0;
0922
0923
0924 if (tsk->mm->context.vdso) {
0925 regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64));
0926 } else {
0927 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
0928 if (err)
0929 goto badframe;
0930 regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]);
0931 }
0932
0933
0934 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
0935 err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
0936
0937
0938 if (is_elf2_task()) {
0939 regs->ctr = (unsigned long) ksig->ka.sa.sa_handler;
0940 regs->gpr[12] = regs->ctr;
0941 } else {
0942
0943
0944
0945
0946
0947 struct func_desc __user *ptr =
0948 (struct func_desc __user *)ksig->ka.sa.sa_handler;
0949
0950 err |= get_user(regs->ctr, &ptr->addr);
0951 err |= get_user(regs->gpr[2], &ptr->toc);
0952 }
0953
0954
0955 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
0956 regs->gpr[1] = newsp;
0957 regs->gpr[3] = ksig->sig;
0958 regs->result = 0;
0959 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
0960 regs->gpr[4] = (unsigned long)&frame->info;
0961 regs->gpr[5] = (unsigned long)&frame->uc;
0962 regs->gpr[6] = (unsigned long) frame;
0963 } else {
0964 regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
0965 }
0966 if (err)
0967 goto badframe;
0968
0969 return 0;
0970
0971 badframe_block:
0972 user_write_access_end();
0973 badframe:
0974 signal_fault(current, regs, "handle_rt_signal64", frame);
0975
0976 return 1;
0977 }