0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/compat.h>
0013 #include <linux/sched.h>
0014 #include <linux/sched/task_stack.h>
0015 #include <linux/mm.h>
0016 #include <linux/smp.h>
0017 #include <linux/kernel.h>
0018 #include <linux/signal.h>
0019 #include <linux/errno.h>
0020 #include <linux/wait.h>
0021 #include <linux/ptrace.h>
0022 #include <linux/unistd.h>
0023 #include <linux/stddef.h>
0024 #include <linux/tty.h>
0025 #include <linux/personality.h>
0026 #include <linux/binfmts.h>
0027 #include <asm/ucontext.h>
0028 #include <linux/uaccess.h>
0029 #include <asm/lowcore.h>
0030 #include <asm/switch_to.h>
0031 #include <asm/vdso.h>
0032 #include "compat_linux.h"
0033 #include "compat_ptrace.h"
0034 #include "entry.h"
0035
0036 typedef struct
0037 {
0038 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
0039 struct sigcontext32 sc;
0040 _sigregs32 sregs;
0041 int signo;
0042 _sigregs_ext32 sregs_ext;
0043 __u16 svc_insn;
0044 } sigframe32;
0045
0046 typedef struct
0047 {
0048 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
0049 __u16 svc_insn;
0050 compat_siginfo_t info;
0051 struct ucontext32 uc;
0052 } rt_sigframe32;
0053
0054
0055 static void store_sigregs(void)
0056 {
0057 save_access_regs(current->thread.acrs);
0058 save_fpu_regs();
0059 }
0060
0061
0062 static void load_sigregs(void)
0063 {
0064 restore_access_regs(current->thread.acrs);
0065 }
0066
0067 static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
0068 {
0069 _sigregs32 user_sregs;
0070 int i;
0071
0072 user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
0073 user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
0074 user_sregs.regs.psw.mask |= PSW32_USER_BITS;
0075 user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
0076 (__u32)(regs->psw.mask & PSW_MASK_BA);
0077 for (i = 0; i < NUM_GPRS; i++)
0078 user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
0079 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
0080 sizeof(user_sregs.regs.acrs));
0081 fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
0082 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
0083 return -EFAULT;
0084 return 0;
0085 }
0086
0087 static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
0088 {
0089 _sigregs32 user_sregs;
0090 int i;
0091
0092
0093 current->restart_block.fn = do_no_restart_syscall;
0094
0095 if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
0096 return -EFAULT;
0097
0098 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
0099 return -EINVAL;
0100
0101
0102 if (test_fp_ctl(user_sregs.fpregs.fpc))
0103 return -EINVAL;
0104
0105
0106 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
0107 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
0108 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
0109 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
0110
0111 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
0112 regs->psw.mask = PSW_ASC_PRIMARY |
0113 (regs->psw.mask & ~PSW_MASK_ASC);
0114 regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
0115 for (i = 0; i < NUM_GPRS; i++)
0116 regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
0117 memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
0118 sizeof(current->thread.acrs));
0119 fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
0120
0121 clear_pt_regs_flag(regs, PIF_SYSCALL);
0122 return 0;
0123 }
0124
0125 static int save_sigregs_ext32(struct pt_regs *regs,
0126 _sigregs_ext32 __user *sregs_ext)
0127 {
0128 __u32 gprs_high[NUM_GPRS];
0129 __u64 vxrs[__NUM_VXRS_LOW];
0130 int i;
0131
0132
0133 for (i = 0; i < NUM_GPRS; i++)
0134 gprs_high[i] = regs->gprs[i] >> 32;
0135 if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
0136 sizeof(sregs_ext->gprs_high)))
0137 return -EFAULT;
0138
0139
0140 if (MACHINE_HAS_VX) {
0141 for (i = 0; i < __NUM_VXRS_LOW; i++)
0142 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
0143 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
0144 sizeof(sregs_ext->vxrs_low)) ||
0145 __copy_to_user(&sregs_ext->vxrs_high,
0146 current->thread.fpu.vxrs + __NUM_VXRS_LOW,
0147 sizeof(sregs_ext->vxrs_high)))
0148 return -EFAULT;
0149 }
0150 return 0;
0151 }
0152
0153 static int restore_sigregs_ext32(struct pt_regs *regs,
0154 _sigregs_ext32 __user *sregs_ext)
0155 {
0156 __u32 gprs_high[NUM_GPRS];
0157 __u64 vxrs[__NUM_VXRS_LOW];
0158 int i;
0159
0160
0161 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
0162 sizeof(sregs_ext->gprs_high)))
0163 return -EFAULT;
0164 for (i = 0; i < NUM_GPRS; i++)
0165 *(__u32 *)®s->gprs[i] = gprs_high[i];
0166
0167
0168 if (MACHINE_HAS_VX) {
0169 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
0170 sizeof(sregs_ext->vxrs_low)) ||
0171 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
0172 &sregs_ext->vxrs_high,
0173 sizeof(sregs_ext->vxrs_high)))
0174 return -EFAULT;
0175 for (i = 0; i < __NUM_VXRS_LOW; i++)
0176 *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i];
0177 }
0178 return 0;
0179 }
0180
0181 COMPAT_SYSCALL_DEFINE0(sigreturn)
0182 {
0183 struct pt_regs *regs = task_pt_regs(current);
0184 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
0185 sigset_t set;
0186
0187 if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
0188 goto badframe;
0189 set_current_blocked(&set);
0190 save_fpu_regs();
0191 if (restore_sigregs32(regs, &frame->sregs))
0192 goto badframe;
0193 if (restore_sigregs_ext32(regs, &frame->sregs_ext))
0194 goto badframe;
0195 load_sigregs();
0196 return regs->gprs[2];
0197 badframe:
0198 force_sig(SIGSEGV);
0199 return 0;
0200 }
0201
0202 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
0203 {
0204 struct pt_regs *regs = task_pt_regs(current);
0205 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
0206 sigset_t set;
0207
0208 if (get_compat_sigset(&set, &frame->uc.uc_sigmask))
0209 goto badframe;
0210 set_current_blocked(&set);
0211 if (compat_restore_altstack(&frame->uc.uc_stack))
0212 goto badframe;
0213 save_fpu_regs();
0214 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
0215 goto badframe;
0216 if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
0217 goto badframe;
0218 load_sigregs();
0219 return regs->gprs[2];
0220 badframe:
0221 force_sig(SIGSEGV);
0222 return 0;
0223 }
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233 static inline void __user *
0234 get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
0235 {
0236 unsigned long sp;
0237
0238
0239 sp = (unsigned long) A(regs->gprs[15]);
0240
0241
0242 if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
0243 return (void __user *) -1UL;
0244
0245
0246 if (ka->sa.sa_flags & SA_ONSTACK) {
0247 if (! sas_ss_flags(sp))
0248 sp = current->sas_ss_sp + current->sas_ss_size;
0249 }
0250
0251 return (void __user *)((sp - frame_size) & -8ul);
0252 }
0253
0254 static int setup_frame32(struct ksignal *ksig, sigset_t *set,
0255 struct pt_regs *regs)
0256 {
0257 int sig = ksig->sig;
0258 sigframe32 __user *frame;
0259 unsigned long restorer;
0260 size_t frame_size;
0261
0262
0263
0264
0265
0266
0267 frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
0268 if (!MACHINE_HAS_VX)
0269 frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
0270 sizeof(frame->sregs_ext.vxrs_high);
0271 frame = get_sigframe(&ksig->ka, regs, frame_size);
0272 if (frame == (void __user *) -1UL)
0273 return -EFAULT;
0274
0275
0276 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
0277 return -EFAULT;
0278
0279
0280 if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
0281 set, sizeof(compat_sigset_t)))
0282 return -EFAULT;
0283 if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs))
0284 return -EFAULT;
0285
0286
0287 store_sigregs();
0288
0289
0290 if (save_sigregs32(regs, &frame->sregs))
0291 return -EFAULT;
0292
0293
0294 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
0295 return -EFAULT;
0296
0297
0298 if (save_sigregs_ext32(regs, &frame->sregs_ext))
0299 return -EFAULT;
0300
0301
0302
0303 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
0304 restorer = (unsigned long __force)
0305 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
0306 } else {
0307 restorer = VDSO32_SYMBOL(current, sigreturn);
0308 }
0309
0310
0311 regs->gprs[14] = restorer;
0312 regs->gprs[15] = (__force __u64) frame;
0313
0314 regs->psw.mask = PSW_MASK_BA |
0315 (PSW_USER_BITS & PSW_MASK_ASC) |
0316 (regs->psw.mask & ~PSW_MASK_ASC);
0317 regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler;
0318
0319 regs->gprs[2] = sig;
0320 regs->gprs[3] = (__force __u64) &frame->sc;
0321
0322
0323
0324 if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
0325 sig == SIGTRAP || sig == SIGFPE) {
0326
0327 regs->gprs[4] = regs->int_code & 127;
0328 regs->gprs[5] = regs->int_parm_long;
0329 regs->gprs[6] = current->thread.last_break;
0330 }
0331
0332 return 0;
0333 }
0334
0335 static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
0336 struct pt_regs *regs)
0337 {
0338 rt_sigframe32 __user *frame;
0339 unsigned long restorer;
0340 size_t frame_size;
0341 u32 uc_flags;
0342
0343 frame_size = sizeof(*frame) -
0344 sizeof(frame->uc.uc_mcontext_ext.__reserved);
0345
0346
0347
0348
0349
0350 uc_flags = UC_GPRS_HIGH;
0351 if (MACHINE_HAS_VX) {
0352 uc_flags |= UC_VXRS;
0353 } else
0354 frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
0355 sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
0356 frame = get_sigframe(&ksig->ka, regs, frame_size);
0357 if (frame == (void __user *) -1UL)
0358 return -EFAULT;
0359
0360
0361 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
0362 return -EFAULT;
0363
0364
0365
0366 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
0367 restorer = (unsigned long __force)
0368 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
0369 } else {
0370 restorer = VDSO32_SYMBOL(current, rt_sigreturn);
0371 }
0372
0373
0374 if (copy_siginfo_to_user32(&frame->info, &ksig->info))
0375 return -EFAULT;
0376
0377
0378 store_sigregs();
0379
0380
0381 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
0382 __put_user(0, &frame->uc.uc_link) ||
0383 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
0384 save_sigregs32(regs, &frame->uc.uc_mcontext) ||
0385 put_compat_sigset(&frame->uc.uc_sigmask, set, sizeof(compat_sigset_t)) ||
0386 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
0387 return -EFAULT;
0388
0389
0390 regs->gprs[14] = restorer;
0391 regs->gprs[15] = (__force __u64) frame;
0392
0393 regs->psw.mask = PSW_MASK_BA |
0394 (PSW_USER_BITS & PSW_MASK_ASC) |
0395 (regs->psw.mask & ~PSW_MASK_ASC);
0396 regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler;
0397
0398 regs->gprs[2] = ksig->sig;
0399 regs->gprs[3] = (__force __u64) &frame->info;
0400 regs->gprs[4] = (__force __u64) &frame->uc;
0401 regs->gprs[5] = current->thread.last_break;
0402 return 0;
0403 }
0404
0405
0406
0407
0408
0409 void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
0410 struct pt_regs *regs)
0411 {
0412 int ret;
0413
0414
0415 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
0416 ret = setup_rt_frame32(ksig, oldset, regs);
0417 else
0418 ret = setup_frame32(ksig, oldset, regs);
0419
0420 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP));
0421 }
0422