0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/slab.h>
0015 #include <linux/export.h>
0016 #include <linux/init.h>
0017 #include <linux/sched/mm.h>
0018 #include <linux/sched/user.h>
0019 #include <linux/sched/debug.h>
0020 #include <linux/sched/task.h>
0021 #include <linux/sched/task_stack.h>
0022 #include <linux/sched/cputime.h>
0023 #include <linux/file.h>
0024 #include <linux/fs.h>
0025 #include <linux/proc_fs.h>
0026 #include <linux/tty.h>
0027 #include <linux/binfmts.h>
0028 #include <linux/coredump.h>
0029 #include <linux/security.h>
0030 #include <linux/syscalls.h>
0031 #include <linux/ptrace.h>
0032 #include <linux/signal.h>
0033 #include <linux/signalfd.h>
0034 #include <linux/ratelimit.h>
0035 #include <linux/task_work.h>
0036 #include <linux/capability.h>
0037 #include <linux/freezer.h>
0038 #include <linux/pid_namespace.h>
0039 #include <linux/nsproxy.h>
0040 #include <linux/user_namespace.h>
0041 #include <linux/uprobes.h>
0042 #include <linux/compat.h>
0043 #include <linux/cn_proc.h>
0044 #include <linux/compiler.h>
0045 #include <linux/posix-timers.h>
0046 #include <linux/cgroup.h>
0047 #include <linux/audit.h>
0048
0049 #define CREATE_TRACE_POINTS
0050 #include <trace/events/signal.h>
0051
0052 #include <asm/param.h>
0053 #include <linux/uaccess.h>
0054 #include <asm/unistd.h>
0055 #include <asm/siginfo.h>
0056 #include <asm/cacheflush.h>
0057 #include <asm/syscall.h> /* for syscall_get_* */
0058
0059
0060
0061
0062
0063 static struct kmem_cache *sigqueue_cachep;
0064
0065 int print_fatal_signals __read_mostly;
0066
0067 static void __user *sig_handler(struct task_struct *t, int sig)
0068 {
0069 return t->sighand->action[sig - 1].sa.sa_handler;
0070 }
0071
0072 static inline bool sig_handler_ignored(void __user *handler, int sig)
0073 {
0074
0075 return handler == SIG_IGN ||
0076 (handler == SIG_DFL && sig_kernel_ignore(sig));
0077 }
0078
0079 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
0080 {
0081 void __user *handler;
0082
0083 handler = sig_handler(t, sig);
0084
0085
0086 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
0087 return true;
0088
0089 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
0090 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
0091 return true;
0092
0093
0094 if (unlikely((t->flags & PF_KTHREAD) &&
0095 (handler == SIG_KTHREAD_KERNEL) && !force))
0096 return true;
0097
0098 return sig_handler_ignored(handler, sig);
0099 }
0100
0101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
0102 {
0103
0104
0105
0106
0107
0108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
0109 return false;
0110
0111
0112
0113
0114
0115
0116 if (t->ptrace && sig != SIGKILL)
0117 return false;
0118
0119 return sig_task_ignored(t, sig, force);
0120 }
0121
0122
0123
0124
0125
0126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
0127 {
0128 unsigned long ready;
0129 long i;
0130
0131 switch (_NSIG_WORDS) {
0132 default:
0133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
0134 ready |= signal->sig[i] &~ blocked->sig[i];
0135 break;
0136
0137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
0138 ready |= signal->sig[2] &~ blocked->sig[2];
0139 ready |= signal->sig[1] &~ blocked->sig[1];
0140 ready |= signal->sig[0] &~ blocked->sig[0];
0141 break;
0142
0143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
0144 ready |= signal->sig[0] &~ blocked->sig[0];
0145 break;
0146
0147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
0148 }
0149 return ready != 0;
0150 }
0151
0152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
0153
0154 static bool recalc_sigpending_tsk(struct task_struct *t)
0155 {
0156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
0157 PENDING(&t->pending, &t->blocked) ||
0158 PENDING(&t->signal->shared_pending, &t->blocked) ||
0159 cgroup_task_frozen(t)) {
0160 set_tsk_thread_flag(t, TIF_SIGPENDING);
0161 return true;
0162 }
0163
0164
0165
0166
0167
0168
0169 return false;
0170 }
0171
0172
0173
0174
0175
0176 void recalc_sigpending_and_wake(struct task_struct *t)
0177 {
0178 if (recalc_sigpending_tsk(t))
0179 signal_wake_up(t, 0);
0180 }
0181
0182 void recalc_sigpending(void)
0183 {
0184 if (!recalc_sigpending_tsk(current) && !freezing(current))
0185 clear_thread_flag(TIF_SIGPENDING);
0186
0187 }
0188 EXPORT_SYMBOL(recalc_sigpending);
0189
0190 void calculate_sigpending(void)
0191 {
0192
0193
0194
0195 spin_lock_irq(¤t->sighand->siglock);
0196 set_tsk_thread_flag(current, TIF_SIGPENDING);
0197 recalc_sigpending();
0198 spin_unlock_irq(¤t->sighand->siglock);
0199 }
0200
0201
0202
0203 #define SYNCHRONOUS_MASK \
0204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
0205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
0206
0207 int next_signal(struct sigpending *pending, sigset_t *mask)
0208 {
0209 unsigned long i, *s, *m, x;
0210 int sig = 0;
0211
0212 s = pending->signal.sig;
0213 m = mask->sig;
0214
0215
0216
0217
0218
0219 x = *s &~ *m;
0220 if (x) {
0221 if (x & SYNCHRONOUS_MASK)
0222 x &= SYNCHRONOUS_MASK;
0223 sig = ffz(~x) + 1;
0224 return sig;
0225 }
0226
0227 switch (_NSIG_WORDS) {
0228 default:
0229 for (i = 1; i < _NSIG_WORDS; ++i) {
0230 x = *++s &~ *++m;
0231 if (!x)
0232 continue;
0233 sig = ffz(~x) + i*_NSIG_BPW + 1;
0234 break;
0235 }
0236 break;
0237
0238 case 2:
0239 x = s[1] &~ m[1];
0240 if (!x)
0241 break;
0242 sig = ffz(~x) + _NSIG_BPW + 1;
0243 break;
0244
0245 case 1:
0246
0247 break;
0248 }
0249
0250 return sig;
0251 }
0252
0253 static inline void print_dropped_signal(int sig)
0254 {
0255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
0256
0257 if (!print_fatal_signals)
0258 return;
0259
0260 if (!__ratelimit(&ratelimit_state))
0261 return;
0262
0263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
0264 current->comm, current->pid, sig);
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
0285 {
0286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
0287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
0288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
0289
0290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
0291 return false;
0292
0293 if (mask & JOBCTL_STOP_SIGMASK)
0294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
0295
0296 task->jobctl |= mask;
0297 return true;
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312 void task_clear_jobctl_trapping(struct task_struct *task)
0313 {
0314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
0315 task->jobctl &= ~JOBCTL_TRAPPING;
0316 smp_mb();
0317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
0318 }
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
0337 {
0338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
0339
0340 if (mask & JOBCTL_STOP_PENDING)
0341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
0342
0343 task->jobctl &= ~mask;
0344
0345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
0346 task_clear_jobctl_trapping(task);
0347 }
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365 static bool task_participate_group_stop(struct task_struct *task)
0366 {
0367 struct signal_struct *sig = task->signal;
0368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
0369
0370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
0371
0372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
0373
0374 if (!consume)
0375 return false;
0376
0377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
0378 sig->group_stop_count--;
0379
0380
0381
0382
0383
0384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
0385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
0386 return true;
0387 }
0388 return false;
0389 }
0390
0391 void task_join_group_stop(struct task_struct *task)
0392 {
0393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
0394 struct signal_struct *sig = current->signal;
0395
0396 if (sig->group_stop_count) {
0397 sig->group_stop_count++;
0398 mask |= JOBCTL_STOP_CONSUME;
0399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
0400 return;
0401
0402
0403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
0404 }
0405
0406
0407
0408
0409
0410
0411 static struct sigqueue *
0412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
0413 int override_rlimit, const unsigned int sigqueue_flags)
0414 {
0415 struct sigqueue *q = NULL;
0416 struct ucounts *ucounts = NULL;
0417 long sigpending;
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 rcu_read_lock();
0428 ucounts = task_ucounts(t);
0429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
0430 rcu_read_unlock();
0431 if (!sigpending)
0432 return NULL;
0433
0434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
0435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
0436 } else {
0437 print_dropped_signal(sig);
0438 }
0439
0440 if (unlikely(q == NULL)) {
0441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
0442 } else {
0443 INIT_LIST_HEAD(&q->list);
0444 q->flags = sigqueue_flags;
0445 q->ucounts = ucounts;
0446 }
0447 return q;
0448 }
0449
0450 static void __sigqueue_free(struct sigqueue *q)
0451 {
0452 if (q->flags & SIGQUEUE_PREALLOC)
0453 return;
0454 if (q->ucounts) {
0455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
0456 q->ucounts = NULL;
0457 }
0458 kmem_cache_free(sigqueue_cachep, q);
0459 }
0460
0461 void flush_sigqueue(struct sigpending *queue)
0462 {
0463 struct sigqueue *q;
0464
0465 sigemptyset(&queue->signal);
0466 while (!list_empty(&queue->list)) {
0467 q = list_entry(queue->list.next, struct sigqueue , list);
0468 list_del_init(&q->list);
0469 __sigqueue_free(q);
0470 }
0471 }
0472
0473
0474
0475
0476 void flush_signals(struct task_struct *t)
0477 {
0478 unsigned long flags;
0479
0480 spin_lock_irqsave(&t->sighand->siglock, flags);
0481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
0482 flush_sigqueue(&t->pending);
0483 flush_sigqueue(&t->signal->shared_pending);
0484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
0485 }
0486 EXPORT_SYMBOL(flush_signals);
0487
0488 #ifdef CONFIG_POSIX_TIMERS
0489 static void __flush_itimer_signals(struct sigpending *pending)
0490 {
0491 sigset_t signal, retain;
0492 struct sigqueue *q, *n;
0493
0494 signal = pending->signal;
0495 sigemptyset(&retain);
0496
0497 list_for_each_entry_safe(q, n, &pending->list, list) {
0498 int sig = q->info.si_signo;
0499
0500 if (likely(q->info.si_code != SI_TIMER)) {
0501 sigaddset(&retain, sig);
0502 } else {
0503 sigdelset(&signal, sig);
0504 list_del_init(&q->list);
0505 __sigqueue_free(q);
0506 }
0507 }
0508
0509 sigorsets(&pending->signal, &signal, &retain);
0510 }
0511
0512 void flush_itimer_signals(void)
0513 {
0514 struct task_struct *tsk = current;
0515 unsigned long flags;
0516
0517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
0518 __flush_itimer_signals(&tsk->pending);
0519 __flush_itimer_signals(&tsk->signal->shared_pending);
0520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
0521 }
0522 #endif
0523
0524 void ignore_signals(struct task_struct *t)
0525 {
0526 int i;
0527
0528 for (i = 0; i < _NSIG; ++i)
0529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
0530
0531 flush_signals(t);
0532 }
0533
0534
0535
0536
0537
0538 void
0539 flush_signal_handlers(struct task_struct *t, int force_default)
0540 {
0541 int i;
0542 struct k_sigaction *ka = &t->sighand->action[0];
0543 for (i = _NSIG ; i != 0 ; i--) {
0544 if (force_default || ka->sa.sa_handler != SIG_IGN)
0545 ka->sa.sa_handler = SIG_DFL;
0546 ka->sa.sa_flags = 0;
0547 #ifdef __ARCH_HAS_SA_RESTORER
0548 ka->sa.sa_restorer = NULL;
0549 #endif
0550 sigemptyset(&ka->sa.sa_mask);
0551 ka++;
0552 }
0553 }
0554
0555 bool unhandled_signal(struct task_struct *tsk, int sig)
0556 {
0557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
0558 if (is_global_init(tsk))
0559 return true;
0560
0561 if (handler != SIG_IGN && handler != SIG_DFL)
0562 return false;
0563
0564
0565 return !tsk->ptrace;
0566 }
0567
0568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
0569 bool *resched_timer)
0570 {
0571 struct sigqueue *q, *first = NULL;
0572
0573
0574
0575
0576
0577 list_for_each_entry(q, &list->list, list) {
0578 if (q->info.si_signo == sig) {
0579 if (first)
0580 goto still_pending;
0581 first = q;
0582 }
0583 }
0584
0585 sigdelset(&list->signal, sig);
0586
0587 if (first) {
0588 still_pending:
0589 list_del_init(&first->list);
0590 copy_siginfo(info, &first->info);
0591
0592 *resched_timer =
0593 (first->flags & SIGQUEUE_PREALLOC) &&
0594 (info->si_code == SI_TIMER) &&
0595 (info->si_sys_private);
0596
0597 __sigqueue_free(first);
0598 } else {
0599
0600
0601
0602
0603
0604 clear_siginfo(info);
0605 info->si_signo = sig;
0606 info->si_errno = 0;
0607 info->si_code = SI_USER;
0608 info->si_pid = 0;
0609 info->si_uid = 0;
0610 }
0611 }
0612
0613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
0614 kernel_siginfo_t *info, bool *resched_timer)
0615 {
0616 int sig = next_signal(pending, mask);
0617
0618 if (sig)
0619 collect_signal(sig, pending, info, resched_timer);
0620 return sig;
0621 }
0622
0623
0624
0625
0626
0627
0628
0629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
0630 kernel_siginfo_t *info, enum pid_type *type)
0631 {
0632 bool resched_timer = false;
0633 int signr;
0634
0635
0636
0637
0638 *type = PIDTYPE_PID;
0639 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
0640 if (!signr) {
0641 *type = PIDTYPE_TGID;
0642 signr = __dequeue_signal(&tsk->signal->shared_pending,
0643 mask, info, &resched_timer);
0644 #ifdef CONFIG_POSIX_TIMERS
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658 if (unlikely(signr == SIGALRM)) {
0659 struct hrtimer *tmr = &tsk->signal->real_timer;
0660
0661 if (!hrtimer_is_queued(tmr) &&
0662 tsk->signal->it_real_incr != 0) {
0663 hrtimer_forward(tmr, tmr->base->get_time(),
0664 tsk->signal->it_real_incr);
0665 hrtimer_restart(tmr);
0666 }
0667 }
0668 #endif
0669 }
0670
0671 recalc_sigpending();
0672 if (!signr)
0673 return 0;
0674
0675 if (unlikely(sig_kernel_stop(signr))) {
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688 current->jobctl |= JOBCTL_STOP_DEQUEUED;
0689 }
0690 #ifdef CONFIG_POSIX_TIMERS
0691 if (resched_timer) {
0692
0693
0694
0695
0696
0697
0698 spin_unlock(&tsk->sighand->siglock);
0699 posixtimer_rearm(info);
0700 spin_lock(&tsk->sighand->siglock);
0701
0702
0703 info->si_sys_private = 0;
0704 }
0705 #endif
0706 return signr;
0707 }
0708 EXPORT_SYMBOL_GPL(dequeue_signal);
0709
0710 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
0711 {
0712 struct task_struct *tsk = current;
0713 struct sigpending *pending = &tsk->pending;
0714 struct sigqueue *q, *sync = NULL;
0715
0716
0717
0718
0719 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
0720 return 0;
0721
0722
0723
0724
0725 list_for_each_entry(q, &pending->list, list) {
0726
0727 if ((q->info.si_code > SI_USER) &&
0728 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
0729 sync = q;
0730 goto next;
0731 }
0732 }
0733 return 0;
0734 next:
0735
0736
0737
0738 list_for_each_entry_continue(q, &pending->list, list) {
0739 if (q->info.si_signo == sync->info.si_signo)
0740 goto still_pending;
0741 }
0742
0743 sigdelset(&pending->signal, sync->info.si_signo);
0744 recalc_sigpending();
0745 still_pending:
0746 list_del_init(&sync->list);
0747 copy_siginfo(info, &sync->info);
0748 __sigqueue_free(sync);
0749 return info->si_signo;
0750 }
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763 void signal_wake_up_state(struct task_struct *t, unsigned int state)
0764 {
0765 lockdep_assert_held(&t->sighand->siglock);
0766
0767 set_tsk_thread_flag(t, TIF_SIGPENDING);
0768
0769
0770
0771
0772
0773
0774
0775
0776 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
0777 kick_process(t);
0778 }
0779
0780
0781
0782
0783
0784
0785
0786 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
0787 {
0788 struct sigqueue *q, *n;
0789 sigset_t m;
0790
0791 sigandsets(&m, mask, &s->signal);
0792 if (sigisemptyset(&m))
0793 return;
0794
0795 sigandnsets(&s->signal, &s->signal, mask);
0796 list_for_each_entry_safe(q, n, &s->list, list) {
0797 if (sigismember(mask, q->info.si_signo)) {
0798 list_del_init(&q->list);
0799 __sigqueue_free(q);
0800 }
0801 }
0802 }
0803
0804 static inline int is_si_special(const struct kernel_siginfo *info)
0805 {
0806 return info <= SEND_SIG_PRIV;
0807 }
0808
0809 static inline bool si_fromuser(const struct kernel_siginfo *info)
0810 {
0811 return info == SEND_SIG_NOINFO ||
0812 (!is_si_special(info) && SI_FROMUSER(info));
0813 }
0814
0815
0816
0817
0818 static bool kill_ok_by_cred(struct task_struct *t)
0819 {
0820 const struct cred *cred = current_cred();
0821 const struct cred *tcred = __task_cred(t);
0822
0823 return uid_eq(cred->euid, tcred->suid) ||
0824 uid_eq(cred->euid, tcred->uid) ||
0825 uid_eq(cred->uid, tcred->suid) ||
0826 uid_eq(cred->uid, tcred->uid) ||
0827 ns_capable(tcred->user_ns, CAP_KILL);
0828 }
0829
0830
0831
0832
0833
0834 static int check_kill_permission(int sig, struct kernel_siginfo *info,
0835 struct task_struct *t)
0836 {
0837 struct pid *sid;
0838 int error;
0839
0840 if (!valid_signal(sig))
0841 return -EINVAL;
0842
0843 if (!si_fromuser(info))
0844 return 0;
0845
0846 error = audit_signal_info(sig, t);
0847 if (error)
0848 return error;
0849
0850 if (!same_thread_group(current, t) &&
0851 !kill_ok_by_cred(t)) {
0852 switch (sig) {
0853 case SIGCONT:
0854 sid = task_session(t);
0855
0856
0857
0858
0859 if (!sid || sid == task_session(current))
0860 break;
0861 fallthrough;
0862 default:
0863 return -EPERM;
0864 }
0865 }
0866
0867 return security_task_kill(t, info, sig, NULL);
0868 }
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 static void ptrace_trap_notify(struct task_struct *t)
0888 {
0889 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
0890 lockdep_assert_held(&t->sighand->siglock);
0891
0892 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
0893 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
0894 }
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906 static bool prepare_signal(int sig, struct task_struct *p, bool force)
0907 {
0908 struct signal_struct *signal = p->signal;
0909 struct task_struct *t;
0910 sigset_t flush;
0911
0912 if (signal->flags & SIGNAL_GROUP_EXIT) {
0913 if (signal->core_state)
0914 return sig == SIGKILL;
0915
0916
0917
0918 } else if (sig_kernel_stop(sig)) {
0919
0920
0921
0922 siginitset(&flush, sigmask(SIGCONT));
0923 flush_sigqueue_mask(&flush, &signal->shared_pending);
0924 for_each_thread(p, t)
0925 flush_sigqueue_mask(&flush, &t->pending);
0926 } else if (sig == SIGCONT) {
0927 unsigned int why;
0928
0929
0930
0931 siginitset(&flush, SIG_KERNEL_STOP_MASK);
0932 flush_sigqueue_mask(&flush, &signal->shared_pending);
0933 for_each_thread(p, t) {
0934 flush_sigqueue_mask(&flush, &t->pending);
0935 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
0936 if (likely(!(t->ptrace & PT_SEIZED))) {
0937 t->jobctl &= ~JOBCTL_STOPPED;
0938 wake_up_state(t, __TASK_STOPPED);
0939 } else
0940 ptrace_trap_notify(t);
0941 }
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951 why = 0;
0952 if (signal->flags & SIGNAL_STOP_STOPPED)
0953 why |= SIGNAL_CLD_CONTINUED;
0954 else if (signal->group_stop_count)
0955 why |= SIGNAL_CLD_STOPPED;
0956
0957 if (why) {
0958
0959
0960
0961
0962
0963 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
0964 signal->group_stop_count = 0;
0965 signal->group_exit_code = 0;
0966 }
0967 }
0968
0969 return !sig_ignored(p, sig, force);
0970 }
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980 static inline bool wants_signal(int sig, struct task_struct *p)
0981 {
0982 if (sigismember(&p->blocked, sig))
0983 return false;
0984
0985 if (p->flags & PF_EXITING)
0986 return false;
0987
0988 if (sig == SIGKILL)
0989 return true;
0990
0991 if (task_is_stopped_or_traced(p))
0992 return false;
0993
0994 return task_curr(p) || !task_sigpending(p);
0995 }
0996
0997 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
0998 {
0999 struct signal_struct *signal = p->signal;
1000 struct task_struct *t;
1001
1002
1003
1004
1005
1006
1007
1008 if (wants_signal(sig, p))
1009 t = p;
1010 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1011
1012
1013
1014
1015 return;
1016 else {
1017
1018
1019
1020 t = signal->curr_target;
1021 while (!wants_signal(sig, t)) {
1022 t = next_thread(t);
1023 if (t == signal->curr_target)
1024
1025
1026
1027
1028
1029 return;
1030 }
1031 signal->curr_target = t;
1032 }
1033
1034
1035
1036
1037
1038 if (sig_fatal(p, sig) &&
1039 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1040 !sigismember(&t->real_blocked, sig) &&
1041 (sig == SIGKILL || !p->ptrace)) {
1042
1043
1044
1045 if (!sig_kernel_coredump(sig)) {
1046
1047
1048
1049
1050
1051
1052 signal->flags = SIGNAL_GROUP_EXIT;
1053 signal->group_exit_code = sig;
1054 signal->group_stop_count = 0;
1055 t = p;
1056 do {
1057 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1058 sigaddset(&t->pending.signal, SIGKILL);
1059 signal_wake_up(t, 1);
1060 } while_each_thread(p, t);
1061 return;
1062 }
1063 }
1064
1065
1066
1067
1068
1069 signal_wake_up(t, sig == SIGKILL);
1070 return;
1071 }
1072
1073 static inline bool legacy_queue(struct sigpending *signals, int sig)
1074 {
1075 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1076 }
1077
1078 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1079 struct task_struct *t, enum pid_type type, bool force)
1080 {
1081 struct sigpending *pending;
1082 struct sigqueue *q;
1083 int override_rlimit;
1084 int ret = 0, result;
1085
1086 lockdep_assert_held(&t->sighand->siglock);
1087
1088 result = TRACE_SIGNAL_IGNORED;
1089 if (!prepare_signal(sig, t, force))
1090 goto ret;
1091
1092 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1093
1094
1095
1096
1097
1098 result = TRACE_SIGNAL_ALREADY_PENDING;
1099 if (legacy_queue(pending, sig))
1100 goto ret;
1101
1102 result = TRACE_SIGNAL_DELIVERED;
1103
1104
1105
1106 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1107 goto out_set;
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 if (sig < SIGRTMIN)
1119 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1120 else
1121 override_rlimit = 0;
1122
1123 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1124
1125 if (q) {
1126 list_add_tail(&q->list, &pending->list);
1127 switch ((unsigned long) info) {
1128 case (unsigned long) SEND_SIG_NOINFO:
1129 clear_siginfo(&q->info);
1130 q->info.si_signo = sig;
1131 q->info.si_errno = 0;
1132 q->info.si_code = SI_USER;
1133 q->info.si_pid = task_tgid_nr_ns(current,
1134 task_active_pid_ns(t));
1135 rcu_read_lock();
1136 q->info.si_uid =
1137 from_kuid_munged(task_cred_xxx(t, user_ns),
1138 current_uid());
1139 rcu_read_unlock();
1140 break;
1141 case (unsigned long) SEND_SIG_PRIV:
1142 clear_siginfo(&q->info);
1143 q->info.si_signo = sig;
1144 q->info.si_errno = 0;
1145 q->info.si_code = SI_KERNEL;
1146 q->info.si_pid = 0;
1147 q->info.si_uid = 0;
1148 break;
1149 default:
1150 copy_siginfo(&q->info, info);
1151 break;
1152 }
1153 } else if (!is_si_special(info) &&
1154 sig >= SIGRTMIN && info->si_code != SI_USER) {
1155
1156
1157
1158
1159
1160 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1161 ret = -EAGAIN;
1162 goto ret;
1163 } else {
1164
1165
1166
1167
1168 result = TRACE_SIGNAL_LOSE_INFO;
1169 }
1170
1171 out_set:
1172 signalfd_notify(t, sig);
1173 sigaddset(&pending->signal, sig);
1174
1175
1176 if (type > PIDTYPE_TGID) {
1177 struct multiprocess_signals *delayed;
1178 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1179 sigset_t *signal = &delayed->signal;
1180
1181 if (sig == SIGCONT)
1182 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1183 else if (sig_kernel_stop(sig))
1184 sigdelset(signal, SIGCONT);
1185 sigaddset(signal, sig);
1186 }
1187 }
1188
1189 complete_signal(sig, t, type);
1190 ret:
1191 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1192 return ret;
1193 }
1194
1195 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1196 {
1197 bool ret = false;
1198 switch (siginfo_layout(info->si_signo, info->si_code)) {
1199 case SIL_KILL:
1200 case SIL_CHLD:
1201 case SIL_RT:
1202 ret = true;
1203 break;
1204 case SIL_TIMER:
1205 case SIL_POLL:
1206 case SIL_FAULT:
1207 case SIL_FAULT_TRAPNO:
1208 case SIL_FAULT_MCEERR:
1209 case SIL_FAULT_BNDERR:
1210 case SIL_FAULT_PKUERR:
1211 case SIL_FAULT_PERF_EVENT:
1212 case SIL_SYS:
1213 ret = false;
1214 break;
1215 }
1216 return ret;
1217 }
1218
1219 int send_signal_locked(int sig, struct kernel_siginfo *info,
1220 struct task_struct *t, enum pid_type type)
1221 {
1222
1223 bool force = false;
1224
1225 if (info == SEND_SIG_NOINFO) {
1226
1227 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1228 } else if (info == SEND_SIG_PRIV) {
1229
1230 force = true;
1231 } else if (has_si_pid_and_uid(info)) {
1232
1233 struct user_namespace *t_user_ns;
1234
1235 rcu_read_lock();
1236 t_user_ns = task_cred_xxx(t, user_ns);
1237 if (current_user_ns() != t_user_ns) {
1238 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1239 info->si_uid = from_kuid_munged(t_user_ns, uid);
1240 }
1241 rcu_read_unlock();
1242
1243
1244 force = (info->si_code == SI_KERNEL);
1245
1246
1247 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1248 info->si_pid = 0;
1249 force = true;
1250 }
1251 }
1252 return __send_signal_locked(sig, info, t, type, force);
1253 }
1254
1255 static void print_fatal_signal(int signr)
1256 {
1257 struct pt_regs *regs = signal_pt_regs();
1258 pr_info("potentially unexpected fatal signal %d.\n", signr);
1259
1260 #if defined(__i386__) && !defined(__arch_um__)
1261 pr_info("code at %08lx: ", regs->ip);
1262 {
1263 int i;
1264 for (i = 0; i < 16; i++) {
1265 unsigned char insn;
1266
1267 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1268 break;
1269 pr_cont("%02x ", insn);
1270 }
1271 }
1272 pr_cont("\n");
1273 #endif
1274 preempt_disable();
1275 show_regs(regs);
1276 preempt_enable();
1277 }
1278
1279 static int __init setup_print_fatal_signals(char *str)
1280 {
1281 get_option (&str, &print_fatal_signals);
1282
1283 return 1;
1284 }
1285
1286 __setup("print-fatal-signals=", setup_print_fatal_signals);
1287
1288 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1289 enum pid_type type)
1290 {
1291 unsigned long flags;
1292 int ret = -ESRCH;
1293
1294 if (lock_task_sighand(p, &flags)) {
1295 ret = send_signal_locked(sig, info, p, type);
1296 unlock_task_sighand(p, &flags);
1297 }
1298
1299 return ret;
1300 }
1301
1302 enum sig_handler {
1303 HANDLER_CURRENT,
1304 HANDLER_SIG_DFL,
1305 HANDLER_EXIT,
1306 };
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 static int
1320 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1321 enum sig_handler handler)
1322 {
1323 unsigned long int flags;
1324 int ret, blocked, ignored;
1325 struct k_sigaction *action;
1326 int sig = info->si_signo;
1327
1328 spin_lock_irqsave(&t->sighand->siglock, flags);
1329 action = &t->sighand->action[sig-1];
1330 ignored = action->sa.sa_handler == SIG_IGN;
1331 blocked = sigismember(&t->blocked, sig);
1332 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1333 action->sa.sa_handler = SIG_DFL;
1334 if (handler == HANDLER_EXIT)
1335 action->sa.sa_flags |= SA_IMMUTABLE;
1336 if (blocked) {
1337 sigdelset(&t->blocked, sig);
1338 recalc_sigpending_and_wake(t);
1339 }
1340 }
1341
1342
1343
1344
1345 if (action->sa.sa_handler == SIG_DFL &&
1346 (!t->ptrace || (handler == HANDLER_EXIT)))
1347 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1348 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1349 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1350
1351 return ret;
1352 }
1353
1354 int force_sig_info(struct kernel_siginfo *info)
1355 {
1356 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1357 }
1358
1359
1360
1361
1362 int zap_other_threads(struct task_struct *p)
1363 {
1364 struct task_struct *t = p;
1365 int count = 0;
1366
1367 p->signal->group_stop_count = 0;
1368
1369 while_each_thread(p, t) {
1370 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1371 count++;
1372
1373
1374 if (t->exit_state)
1375 continue;
1376 sigaddset(&t->pending.signal, SIGKILL);
1377 signal_wake_up(t, 1);
1378 }
1379
1380 return count;
1381 }
1382
1383 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1384 unsigned long *flags)
1385 {
1386 struct sighand_struct *sighand;
1387
1388 rcu_read_lock();
1389 for (;;) {
1390 sighand = rcu_dereference(tsk->sighand);
1391 if (unlikely(sighand == NULL))
1392 break;
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 spin_lock_irqsave(&sighand->siglock, *flags);
1406 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1407 break;
1408 spin_unlock_irqrestore(&sighand->siglock, *flags);
1409 }
1410 rcu_read_unlock();
1411
1412 return sighand;
1413 }
1414
1415 #ifdef CONFIG_LOCKDEP
1416 void lockdep_assert_task_sighand_held(struct task_struct *task)
1417 {
1418 struct sighand_struct *sighand;
1419
1420 rcu_read_lock();
1421 sighand = rcu_dereference(task->sighand);
1422 if (sighand)
1423 lockdep_assert_held(&sighand->siglock);
1424 else
1425 WARN_ON_ONCE(1);
1426 rcu_read_unlock();
1427 }
1428 #endif
1429
1430
1431
1432
1433 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1434 struct task_struct *p, enum pid_type type)
1435 {
1436 int ret;
1437
1438 rcu_read_lock();
1439 ret = check_kill_permission(sig, info, p);
1440 rcu_read_unlock();
1441
1442 if (!ret && sig)
1443 ret = do_send_sig_info(sig, info, p, type);
1444
1445 return ret;
1446 }
1447
1448
1449
1450
1451
1452
1453 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1454 {
1455 struct task_struct *p = NULL;
1456 int retval, success;
1457
1458 success = 0;
1459 retval = -ESRCH;
1460 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1461 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1462 success |= !err;
1463 retval = err;
1464 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1465 return success ? 0 : retval;
1466 }
1467
1468 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1469 {
1470 int error = -ESRCH;
1471 struct task_struct *p;
1472
1473 for (;;) {
1474 rcu_read_lock();
1475 p = pid_task(pid, PIDTYPE_PID);
1476 if (p)
1477 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1478 rcu_read_unlock();
1479 if (likely(!p || error != -ESRCH))
1480 return error;
1481
1482
1483
1484
1485
1486
1487 }
1488 }
1489
1490 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1491 {
1492 int error;
1493 rcu_read_lock();
1494 error = kill_pid_info(sig, info, find_vpid(pid));
1495 rcu_read_unlock();
1496 return error;
1497 }
1498
1499 static inline bool kill_as_cred_perm(const struct cred *cred,
1500 struct task_struct *target)
1501 {
1502 const struct cred *pcred = __task_cred(target);
1503
1504 return uid_eq(cred->euid, pcred->suid) ||
1505 uid_eq(cred->euid, pcred->uid) ||
1506 uid_eq(cred->uid, pcred->suid) ||
1507 uid_eq(cred->uid, pcred->uid);
1508 }
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1536 struct pid *pid, const struct cred *cred)
1537 {
1538 struct kernel_siginfo info;
1539 struct task_struct *p;
1540 unsigned long flags;
1541 int ret = -EINVAL;
1542
1543 if (!valid_signal(sig))
1544 return ret;
1545
1546 clear_siginfo(&info);
1547 info.si_signo = sig;
1548 info.si_errno = errno;
1549 info.si_code = SI_ASYNCIO;
1550 *((sigval_t *)&info.si_pid) = addr;
1551
1552 rcu_read_lock();
1553 p = pid_task(pid, PIDTYPE_PID);
1554 if (!p) {
1555 ret = -ESRCH;
1556 goto out_unlock;
1557 }
1558 if (!kill_as_cred_perm(cred, p)) {
1559 ret = -EPERM;
1560 goto out_unlock;
1561 }
1562 ret = security_task_kill(p, &info, sig, cred);
1563 if (ret)
1564 goto out_unlock;
1565
1566 if (sig) {
1567 if (lock_task_sighand(p, &flags)) {
1568 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1569 unlock_task_sighand(p, &flags);
1570 } else
1571 ret = -ESRCH;
1572 }
1573 out_unlock:
1574 rcu_read_unlock();
1575 return ret;
1576 }
1577 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1578
1579
1580
1581
1582
1583
1584
1585
1586 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1587 {
1588 int ret;
1589
1590 if (pid > 0)
1591 return kill_proc_info(sig, info, pid);
1592
1593
1594 if (pid == INT_MIN)
1595 return -ESRCH;
1596
1597 read_lock(&tasklist_lock);
1598 if (pid != -1) {
1599 ret = __kill_pgrp_info(sig, info,
1600 pid ? find_vpid(-pid) : task_pgrp(current));
1601 } else {
1602 int retval = 0, count = 0;
1603 struct task_struct * p;
1604
1605 for_each_process(p) {
1606 if (task_pid_vnr(p) > 1 &&
1607 !same_thread_group(p, current)) {
1608 int err = group_send_sig_info(sig, info, p,
1609 PIDTYPE_MAX);
1610 ++count;
1611 if (err != -EPERM)
1612 retval = err;
1613 }
1614 }
1615 ret = count ? retval : -ESRCH;
1616 }
1617 read_unlock(&tasklist_lock);
1618
1619 return ret;
1620 }
1621
1622
1623
1624
1625
1626 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1627 {
1628
1629
1630
1631
1632 if (!valid_signal(sig))
1633 return -EINVAL;
1634
1635 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1636 }
1637 EXPORT_SYMBOL(send_sig_info);
1638
1639 #define __si_special(priv) \
1640 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1641
1642 int
1643 send_sig(int sig, struct task_struct *p, int priv)
1644 {
1645 return send_sig_info(sig, __si_special(priv), p);
1646 }
1647 EXPORT_SYMBOL(send_sig);
1648
1649 void force_sig(int sig)
1650 {
1651 struct kernel_siginfo info;
1652
1653 clear_siginfo(&info);
1654 info.si_signo = sig;
1655 info.si_errno = 0;
1656 info.si_code = SI_KERNEL;
1657 info.si_pid = 0;
1658 info.si_uid = 0;
1659 force_sig_info(&info);
1660 }
1661 EXPORT_SYMBOL(force_sig);
1662
1663 void force_fatal_sig(int sig)
1664 {
1665 struct kernel_siginfo info;
1666
1667 clear_siginfo(&info);
1668 info.si_signo = sig;
1669 info.si_errno = 0;
1670 info.si_code = SI_KERNEL;
1671 info.si_pid = 0;
1672 info.si_uid = 0;
1673 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1674 }
1675
1676 void force_exit_sig(int sig)
1677 {
1678 struct kernel_siginfo info;
1679
1680 clear_siginfo(&info);
1681 info.si_signo = sig;
1682 info.si_errno = 0;
1683 info.si_code = SI_KERNEL;
1684 info.si_pid = 0;
1685 info.si_uid = 0;
1686 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1687 }
1688
1689
1690
1691
1692
1693
1694
1695 void force_sigsegv(int sig)
1696 {
1697 if (sig == SIGSEGV)
1698 force_fatal_sig(SIGSEGV);
1699 else
1700 force_sig(SIGSEGV);
1701 }
1702
1703 int force_sig_fault_to_task(int sig, int code, void __user *addr
1704 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1705 , struct task_struct *t)
1706 {
1707 struct kernel_siginfo info;
1708
1709 clear_siginfo(&info);
1710 info.si_signo = sig;
1711 info.si_errno = 0;
1712 info.si_code = code;
1713 info.si_addr = addr;
1714 #ifdef __ia64__
1715 info.si_imm = imm;
1716 info.si_flags = flags;
1717 info.si_isr = isr;
1718 #endif
1719 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1720 }
1721
1722 int force_sig_fault(int sig, int code, void __user *addr
1723 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1724 {
1725 return force_sig_fault_to_task(sig, code, addr
1726 ___ARCH_SI_IA64(imm, flags, isr), current);
1727 }
1728
1729 int send_sig_fault(int sig, int code, void __user *addr
1730 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1731 , struct task_struct *t)
1732 {
1733 struct kernel_siginfo info;
1734
1735 clear_siginfo(&info);
1736 info.si_signo = sig;
1737 info.si_errno = 0;
1738 info.si_code = code;
1739 info.si_addr = addr;
1740 #ifdef __ia64__
1741 info.si_imm = imm;
1742 info.si_flags = flags;
1743 info.si_isr = isr;
1744 #endif
1745 return send_sig_info(info.si_signo, &info, t);
1746 }
1747
1748 int force_sig_mceerr(int code, void __user *addr, short lsb)
1749 {
1750 struct kernel_siginfo info;
1751
1752 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1753 clear_siginfo(&info);
1754 info.si_signo = SIGBUS;
1755 info.si_errno = 0;
1756 info.si_code = code;
1757 info.si_addr = addr;
1758 info.si_addr_lsb = lsb;
1759 return force_sig_info(&info);
1760 }
1761
1762 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1763 {
1764 struct kernel_siginfo info;
1765
1766 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1767 clear_siginfo(&info);
1768 info.si_signo = SIGBUS;
1769 info.si_errno = 0;
1770 info.si_code = code;
1771 info.si_addr = addr;
1772 info.si_addr_lsb = lsb;
1773 return send_sig_info(info.si_signo, &info, t);
1774 }
1775 EXPORT_SYMBOL(send_sig_mceerr);
1776
1777 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1778 {
1779 struct kernel_siginfo info;
1780
1781 clear_siginfo(&info);
1782 info.si_signo = SIGSEGV;
1783 info.si_errno = 0;
1784 info.si_code = SEGV_BNDERR;
1785 info.si_addr = addr;
1786 info.si_lower = lower;
1787 info.si_upper = upper;
1788 return force_sig_info(&info);
1789 }
1790
1791 #ifdef SEGV_PKUERR
1792 int force_sig_pkuerr(void __user *addr, u32 pkey)
1793 {
1794 struct kernel_siginfo info;
1795
1796 clear_siginfo(&info);
1797 info.si_signo = SIGSEGV;
1798 info.si_errno = 0;
1799 info.si_code = SEGV_PKUERR;
1800 info.si_addr = addr;
1801 info.si_pkey = pkey;
1802 return force_sig_info(&info);
1803 }
1804 #endif
1805
1806 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1807 {
1808 struct kernel_siginfo info;
1809
1810 clear_siginfo(&info);
1811 info.si_signo = SIGTRAP;
1812 info.si_errno = 0;
1813 info.si_code = TRAP_PERF;
1814 info.si_addr = addr;
1815 info.si_perf_data = sig_data;
1816 info.si_perf_type = type;
1817
1818
1819
1820
1821
1822
1823
1824
1825 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1826 TRAP_PERF_FLAG_ASYNC :
1827 0;
1828
1829 return send_sig_info(info.si_signo, &info, current);
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1841 {
1842 struct kernel_siginfo info;
1843
1844 clear_siginfo(&info);
1845 info.si_signo = SIGSYS;
1846 info.si_code = SYS_SECCOMP;
1847 info.si_call_addr = (void __user *)KSTK_EIP(current);
1848 info.si_errno = reason;
1849 info.si_arch = syscall_get_arch(current);
1850 info.si_syscall = syscall;
1851 return force_sig_info_to_task(&info, current,
1852 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1853 }
1854
1855
1856
1857
1858 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1859 {
1860 struct kernel_siginfo info;
1861
1862 clear_siginfo(&info);
1863 info.si_signo = SIGTRAP;
1864 info.si_errno = errno;
1865 info.si_code = TRAP_HWBKPT;
1866 info.si_addr = addr;
1867 return force_sig_info(&info);
1868 }
1869
1870
1871
1872
1873 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1874 {
1875 struct kernel_siginfo info;
1876
1877 clear_siginfo(&info);
1878 info.si_signo = sig;
1879 info.si_errno = 0;
1880 info.si_code = code;
1881 info.si_addr = addr;
1882 info.si_trapno = trapno;
1883 return force_sig_info(&info);
1884 }
1885
1886
1887
1888
1889 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1890 struct task_struct *t)
1891 {
1892 struct kernel_siginfo info;
1893
1894 clear_siginfo(&info);
1895 info.si_signo = sig;
1896 info.si_errno = 0;
1897 info.si_code = code;
1898 info.si_addr = addr;
1899 info.si_trapno = trapno;
1900 return send_sig_info(info.si_signo, &info, t);
1901 }
1902
1903 int kill_pgrp(struct pid *pid, int sig, int priv)
1904 {
1905 int ret;
1906
1907 read_lock(&tasklist_lock);
1908 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1909 read_unlock(&tasklist_lock);
1910
1911 return ret;
1912 }
1913 EXPORT_SYMBOL(kill_pgrp);
1914
1915 int kill_pid(struct pid *pid, int sig, int priv)
1916 {
1917 return kill_pid_info(sig, __si_special(priv), pid);
1918 }
1919 EXPORT_SYMBOL(kill_pid);
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930 struct sigqueue *sigqueue_alloc(void)
1931 {
1932 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1933 }
1934
1935 void sigqueue_free(struct sigqueue *q)
1936 {
1937 unsigned long flags;
1938 spinlock_t *lock = ¤t->sighand->siglock;
1939
1940 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1941
1942
1943
1944
1945
1946 spin_lock_irqsave(lock, flags);
1947 q->flags &= ~SIGQUEUE_PREALLOC;
1948
1949
1950
1951
1952 if (!list_empty(&q->list))
1953 q = NULL;
1954 spin_unlock_irqrestore(lock, flags);
1955
1956 if (q)
1957 __sigqueue_free(q);
1958 }
1959
1960 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1961 {
1962 int sig = q->info.si_signo;
1963 struct sigpending *pending;
1964 struct task_struct *t;
1965 unsigned long flags;
1966 int ret, result;
1967
1968 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1969
1970 ret = -1;
1971 rcu_read_lock();
1972 t = pid_task(pid, type);
1973 if (!t || !likely(lock_task_sighand(t, &flags)))
1974 goto ret;
1975
1976 ret = 1;
1977 result = TRACE_SIGNAL_IGNORED;
1978 if (!prepare_signal(sig, t, false))
1979 goto out;
1980
1981 ret = 0;
1982 if (unlikely(!list_empty(&q->list))) {
1983
1984
1985
1986
1987 BUG_ON(q->info.si_code != SI_TIMER);
1988 q->info.si_overrun++;
1989 result = TRACE_SIGNAL_ALREADY_PENDING;
1990 goto out;
1991 }
1992 q->info.si_overrun = 0;
1993
1994 signalfd_notify(t, sig);
1995 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1996 list_add_tail(&q->list, &pending->list);
1997 sigaddset(&pending->signal, sig);
1998 complete_signal(sig, t, type);
1999 result = TRACE_SIGNAL_DELIVERED;
2000 out:
2001 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2002 unlock_task_sighand(t, &flags);
2003 ret:
2004 rcu_read_unlock();
2005 return ret;
2006 }
2007
2008 static void do_notify_pidfd(struct task_struct *task)
2009 {
2010 struct pid *pid;
2011
2012 WARN_ON(task->exit_state == 0);
2013 pid = task_pid(task);
2014 wake_up_all(&pid->wait_pidfd);
2015 }
2016
2017
2018
2019
2020
2021
2022
2023
2024 bool do_notify_parent(struct task_struct *tsk, int sig)
2025 {
2026 struct kernel_siginfo info;
2027 unsigned long flags;
2028 struct sighand_struct *psig;
2029 bool autoreap = false;
2030 u64 utime, stime;
2031
2032 WARN_ON_ONCE(sig == -1);
2033
2034
2035 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2036
2037 WARN_ON_ONCE(!tsk->ptrace &&
2038 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2039
2040
2041 do_notify_pidfd(tsk);
2042
2043 if (sig != SIGCHLD) {
2044
2045
2046
2047
2048 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2049 sig = SIGCHLD;
2050 }
2051
2052 clear_siginfo(&info);
2053 info.si_signo = sig;
2054 info.si_errno = 0;
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066 rcu_read_lock();
2067 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2068 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2069 task_uid(tsk));
2070 rcu_read_unlock();
2071
2072 task_cputime(tsk, &utime, &stime);
2073 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2074 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2075
2076 info.si_status = tsk->exit_code & 0x7f;
2077 if (tsk->exit_code & 0x80)
2078 info.si_code = CLD_DUMPED;
2079 else if (tsk->exit_code & 0x7f)
2080 info.si_code = CLD_KILLED;
2081 else {
2082 info.si_code = CLD_EXITED;
2083 info.si_status = tsk->exit_code >> 8;
2084 }
2085
2086 psig = tsk->parent->sighand;
2087 spin_lock_irqsave(&psig->siglock, flags);
2088 if (!tsk->ptrace && sig == SIGCHLD &&
2089 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2090 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106 autoreap = true;
2107 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2108 sig = 0;
2109 }
2110
2111
2112
2113
2114 if (valid_signal(sig) && sig)
2115 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2116 __wake_up_parent(tsk, tsk->parent);
2117 spin_unlock_irqrestore(&psig->siglock, flags);
2118
2119 return autoreap;
2120 }
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135 static void do_notify_parent_cldstop(struct task_struct *tsk,
2136 bool for_ptracer, int why)
2137 {
2138 struct kernel_siginfo info;
2139 unsigned long flags;
2140 struct task_struct *parent;
2141 struct sighand_struct *sighand;
2142 u64 utime, stime;
2143
2144 if (for_ptracer) {
2145 parent = tsk->parent;
2146 } else {
2147 tsk = tsk->group_leader;
2148 parent = tsk->real_parent;
2149 }
2150
2151 clear_siginfo(&info);
2152 info.si_signo = SIGCHLD;
2153 info.si_errno = 0;
2154
2155
2156
2157 rcu_read_lock();
2158 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2159 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2160 rcu_read_unlock();
2161
2162 task_cputime(tsk, &utime, &stime);
2163 info.si_utime = nsec_to_clock_t(utime);
2164 info.si_stime = nsec_to_clock_t(stime);
2165
2166 info.si_code = why;
2167 switch (why) {
2168 case CLD_CONTINUED:
2169 info.si_status = SIGCONT;
2170 break;
2171 case CLD_STOPPED:
2172 info.si_status = tsk->signal->group_exit_code & 0x7f;
2173 break;
2174 case CLD_TRAPPED:
2175 info.si_status = tsk->exit_code & 0x7f;
2176 break;
2177 default:
2178 BUG();
2179 }
2180
2181 sighand = parent->sighand;
2182 spin_lock_irqsave(&sighand->siglock, flags);
2183 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2184 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2185 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2186
2187
2188
2189 __wake_up_parent(tsk, parent);
2190 spin_unlock_irqrestore(&sighand->siglock, flags);
2191 }
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 static int ptrace_stop(int exit_code, int why, unsigned long message,
2206 kernel_siginfo_t *info)
2207 __releases(¤t->sighand->siglock)
2208 __acquires(¤t->sighand->siglock)
2209 {
2210 bool gstop_done = false;
2211
2212 if (arch_ptrace_stop_needed()) {
2213
2214
2215
2216
2217
2218
2219
2220
2221 spin_unlock_irq(¤t->sighand->siglock);
2222 arch_ptrace_stop();
2223 spin_lock_irq(¤t->sighand->siglock);
2224 }
2225
2226
2227
2228
2229
2230
2231
2232 if (!current->ptrace || __fatal_signal_pending(current))
2233 return exit_code;
2234
2235 set_special_state(TASK_TRACED);
2236 current->jobctl |= JOBCTL_TRACED;
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256 smp_wmb();
2257
2258 current->ptrace_message = message;
2259 current->last_siginfo = info;
2260 current->exit_code = exit_code;
2261
2262
2263
2264
2265
2266
2267
2268
2269 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2270 gstop_done = task_participate_group_stop(current);
2271
2272
2273 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2274 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2275 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2276
2277
2278 task_clear_jobctl_trapping(current);
2279
2280 spin_unlock_irq(¤t->sighand->siglock);
2281 read_lock(&tasklist_lock);
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292 if (current->ptrace)
2293 do_notify_parent_cldstop(current, true, why);
2294 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2295 do_notify_parent_cldstop(current, false, why);
2296
2297
2298
2299
2300
2301
2302
2303 preempt_disable();
2304 read_unlock(&tasklist_lock);
2305 cgroup_enter_frozen();
2306 preempt_enable_no_resched();
2307 freezable_schedule();
2308 cgroup_leave_frozen(true);
2309
2310
2311
2312
2313
2314
2315 spin_lock_irq(¤t->sighand->siglock);
2316 exit_code = current->exit_code;
2317 current->last_siginfo = NULL;
2318 current->ptrace_message = 0;
2319 current->exit_code = 0;
2320
2321
2322 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2323
2324
2325
2326
2327
2328
2329 recalc_sigpending_tsk(current);
2330 return exit_code;
2331 }
2332
2333 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2334 {
2335 kernel_siginfo_t info;
2336
2337 clear_siginfo(&info);
2338 info.si_signo = signr;
2339 info.si_code = exit_code;
2340 info.si_pid = task_pid_vnr(current);
2341 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2342
2343
2344 return ptrace_stop(exit_code, why, message, &info);
2345 }
2346
2347 int ptrace_notify(int exit_code, unsigned long message)
2348 {
2349 int signr;
2350
2351 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2352 if (unlikely(task_work_pending(current)))
2353 task_work_run();
2354
2355 spin_lock_irq(¤t->sighand->siglock);
2356 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2357 spin_unlock_irq(¤t->sighand->siglock);
2358 return signr;
2359 }
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383 static bool do_signal_stop(int signr)
2384 __releases(¤t->sighand->siglock)
2385 {
2386 struct signal_struct *sig = current->signal;
2387
2388 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2389 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2390 struct task_struct *t;
2391
2392
2393 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2394
2395 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2396 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2397 unlikely(sig->group_exec_task))
2398 return false;
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2419 sig->group_exit_code = signr;
2420
2421 sig->group_stop_count = 0;
2422
2423 if (task_set_jobctl_pending(current, signr | gstop))
2424 sig->group_stop_count++;
2425
2426 t = current;
2427 while_each_thread(current, t) {
2428
2429
2430
2431
2432
2433 if (!task_is_stopped(t) &&
2434 task_set_jobctl_pending(t, signr | gstop)) {
2435 sig->group_stop_count++;
2436 if (likely(!(t->ptrace & PT_SEIZED)))
2437 signal_wake_up(t, 0);
2438 else
2439 ptrace_trap_notify(t);
2440 }
2441 }
2442 }
2443
2444 if (likely(!current->ptrace)) {
2445 int notify = 0;
2446
2447
2448
2449
2450
2451
2452 if (task_participate_group_stop(current))
2453 notify = CLD_STOPPED;
2454
2455 current->jobctl |= JOBCTL_STOPPED;
2456 set_special_state(TASK_STOPPED);
2457 spin_unlock_irq(¤t->sighand->siglock);
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468 if (notify) {
2469 read_lock(&tasklist_lock);
2470 do_notify_parent_cldstop(current, false, notify);
2471 read_unlock(&tasklist_lock);
2472 }
2473
2474
2475 cgroup_enter_frozen();
2476 freezable_schedule();
2477 return true;
2478 } else {
2479
2480
2481
2482
2483 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2484 return false;
2485 }
2486 }
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 static void do_jobctl_trap(void)
2504 {
2505 struct signal_struct *signal = current->signal;
2506 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2507
2508 if (current->ptrace & PT_SEIZED) {
2509 if (!signal->group_stop_count &&
2510 !(signal->flags & SIGNAL_STOP_STOPPED))
2511 signr = SIGTRAP;
2512 WARN_ON_ONCE(!signr);
2513 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2514 CLD_STOPPED, 0);
2515 } else {
2516 WARN_ON_ONCE(!signr);
2517 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2518 }
2519 }
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531 static void do_freezer_trap(void)
2532 __releases(¤t->sighand->siglock)
2533 {
2534
2535
2536
2537
2538
2539 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2540 JOBCTL_TRAP_FREEZE) {
2541 spin_unlock_irq(¤t->sighand->siglock);
2542 return;
2543 }
2544
2545
2546
2547
2548
2549
2550
2551 __set_current_state(TASK_INTERRUPTIBLE);
2552 clear_thread_flag(TIF_SIGPENDING);
2553 spin_unlock_irq(¤t->sighand->siglock);
2554 cgroup_enter_frozen();
2555 freezable_schedule();
2556 }
2557
2558 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2559 {
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2570 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2571
2572
2573 if (signr == 0)
2574 return signr;
2575
2576
2577
2578
2579
2580
2581
2582 if (signr != info->si_signo) {
2583 clear_siginfo(info);
2584 info->si_signo = signr;
2585 info->si_errno = 0;
2586 info->si_code = SI_USER;
2587 rcu_read_lock();
2588 info->si_pid = task_pid_vnr(current->parent);
2589 info->si_uid = from_kuid_munged(current_user_ns(),
2590 task_uid(current->parent));
2591 rcu_read_unlock();
2592 }
2593
2594
2595 if (sigismember(¤t->blocked, signr) ||
2596 fatal_signal_pending(current)) {
2597 send_signal_locked(signr, info, current, type);
2598 signr = 0;
2599 }
2600
2601 return signr;
2602 }
2603
2604 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2605 {
2606 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2607 case SIL_FAULT:
2608 case SIL_FAULT_TRAPNO:
2609 case SIL_FAULT_MCEERR:
2610 case SIL_FAULT_BNDERR:
2611 case SIL_FAULT_PKUERR:
2612 case SIL_FAULT_PERF_EVENT:
2613 ksig->info.si_addr = arch_untagged_si_addr(
2614 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2615 break;
2616 case SIL_KILL:
2617 case SIL_TIMER:
2618 case SIL_POLL:
2619 case SIL_CHLD:
2620 case SIL_RT:
2621 case SIL_SYS:
2622 break;
2623 }
2624 }
2625
2626 bool get_signal(struct ksignal *ksig)
2627 {
2628 struct sighand_struct *sighand = current->sighand;
2629 struct signal_struct *signal = current->signal;
2630 int signr;
2631
2632 clear_notify_signal();
2633 if (unlikely(task_work_pending(current)))
2634 task_work_run();
2635
2636 if (!task_sigpending(current))
2637 return false;
2638
2639 if (unlikely(uprobe_deny_signal()))
2640 return false;
2641
2642
2643
2644
2645
2646
2647 try_to_freeze();
2648
2649 relock:
2650 spin_lock_irq(&sighand->siglock);
2651
2652
2653
2654
2655
2656
2657 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2658 int why;
2659
2660 if (signal->flags & SIGNAL_CLD_CONTINUED)
2661 why = CLD_CONTINUED;
2662 else
2663 why = CLD_STOPPED;
2664
2665 signal->flags &= ~SIGNAL_CLD_MASK;
2666
2667 spin_unlock_irq(&sighand->siglock);
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677 read_lock(&tasklist_lock);
2678 do_notify_parent_cldstop(current, false, why);
2679
2680 if (ptrace_reparented(current->group_leader))
2681 do_notify_parent_cldstop(current->group_leader,
2682 true, why);
2683 read_unlock(&tasklist_lock);
2684
2685 goto relock;
2686 }
2687
2688 for (;;) {
2689 struct k_sigaction *ka;
2690 enum pid_type type;
2691
2692
2693 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2694 signal->group_exec_task) {
2695 ksig->info.si_signo = signr = SIGKILL;
2696 sigdelset(¤t->pending.signal, SIGKILL);
2697 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2698 &sighand->action[SIGKILL - 1]);
2699 recalc_sigpending();
2700 goto fatal;
2701 }
2702
2703 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2704 do_signal_stop(0))
2705 goto relock;
2706
2707 if (unlikely(current->jobctl &
2708 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2709 if (current->jobctl & JOBCTL_TRAP_MASK) {
2710 do_jobctl_trap();
2711 spin_unlock_irq(&sighand->siglock);
2712 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2713 do_freezer_trap();
2714
2715 goto relock;
2716 }
2717
2718
2719
2720
2721
2722 if (unlikely(cgroup_task_frozen(current))) {
2723 spin_unlock_irq(&sighand->siglock);
2724 cgroup_leave_frozen(false);
2725 goto relock;
2726 }
2727
2728
2729
2730
2731
2732
2733
2734 type = PIDTYPE_PID;
2735 signr = dequeue_synchronous_signal(&ksig->info);
2736 if (!signr)
2737 signr = dequeue_signal(current, ¤t->blocked,
2738 &ksig->info, &type);
2739
2740 if (!signr)
2741 break;
2742
2743 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2744 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2745 signr = ptrace_signal(signr, &ksig->info, type);
2746 if (!signr)
2747 continue;
2748 }
2749
2750 ka = &sighand->action[signr-1];
2751
2752
2753 trace_signal_deliver(signr, &ksig->info, ka);
2754
2755 if (ka->sa.sa_handler == SIG_IGN)
2756 continue;
2757 if (ka->sa.sa_handler != SIG_DFL) {
2758
2759 ksig->ka = *ka;
2760
2761 if (ka->sa.sa_flags & SA_ONESHOT)
2762 ka->sa.sa_handler = SIG_DFL;
2763
2764 break;
2765 }
2766
2767
2768
2769
2770 if (sig_kernel_ignore(signr))
2771 continue;
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2784 !sig_kernel_only(signr))
2785 continue;
2786
2787 if (sig_kernel_stop(signr)) {
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798 if (signr != SIGSTOP) {
2799 spin_unlock_irq(&sighand->siglock);
2800
2801
2802
2803 if (is_current_pgrp_orphaned())
2804 goto relock;
2805
2806 spin_lock_irq(&sighand->siglock);
2807 }
2808
2809 if (likely(do_signal_stop(ksig->info.si_signo))) {
2810
2811 goto relock;
2812 }
2813
2814
2815
2816
2817
2818 continue;
2819 }
2820
2821 fatal:
2822 spin_unlock_irq(&sighand->siglock);
2823 if (unlikely(cgroup_task_frozen(current)))
2824 cgroup_leave_frozen(true);
2825
2826
2827
2828
2829 current->flags |= PF_SIGNALED;
2830
2831 if (sig_kernel_coredump(signr)) {
2832 if (print_fatal_signals)
2833 print_fatal_signal(ksig->info.si_signo);
2834 proc_coredump_connector(current);
2835
2836
2837
2838
2839
2840
2841
2842
2843 do_coredump(&ksig->info);
2844 }
2845
2846
2847
2848
2849
2850
2851 if (current->flags & PF_IO_WORKER)
2852 goto out;
2853
2854
2855
2856
2857 do_group_exit(ksig->info.si_signo);
2858
2859 }
2860 spin_unlock_irq(&sighand->siglock);
2861 out:
2862 ksig->sig = signr;
2863
2864 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2865 hide_si_addr_tag_bits(ksig);
2866
2867 return ksig->sig > 0;
2868 }
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880 static void signal_delivered(struct ksignal *ksig, int stepping)
2881 {
2882 sigset_t blocked;
2883
2884
2885
2886
2887
2888 clear_restore_sigmask();
2889
2890 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2891 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2892 sigaddset(&blocked, ksig->sig);
2893 set_current_blocked(&blocked);
2894 if (current->sas_ss_flags & SS_AUTODISARM)
2895 sas_ss_reset(current);
2896 if (stepping)
2897 ptrace_notify(SIGTRAP, 0);
2898 }
2899
2900 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2901 {
2902 if (failed)
2903 force_sigsegv(ksig->sig);
2904 else
2905 signal_delivered(ksig, stepping);
2906 }
2907
2908
2909
2910
2911
2912
2913 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2914 {
2915 sigset_t retarget;
2916 struct task_struct *t;
2917
2918 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2919 if (sigisemptyset(&retarget))
2920 return;
2921
2922 t = tsk;
2923 while_each_thread(tsk, t) {
2924 if (t->flags & PF_EXITING)
2925 continue;
2926
2927 if (!has_pending_signals(&retarget, &t->blocked))
2928 continue;
2929
2930 sigandsets(&retarget, &retarget, &t->blocked);
2931
2932 if (!task_sigpending(t))
2933 signal_wake_up(t, 0);
2934
2935 if (sigisemptyset(&retarget))
2936 break;
2937 }
2938 }
2939
2940 void exit_signals(struct task_struct *tsk)
2941 {
2942 int group_stop = 0;
2943 sigset_t unblocked;
2944
2945
2946
2947
2948
2949 cgroup_threadgroup_change_begin(tsk);
2950
2951 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2952 tsk->flags |= PF_EXITING;
2953 cgroup_threadgroup_change_end(tsk);
2954 return;
2955 }
2956
2957 spin_lock_irq(&tsk->sighand->siglock);
2958
2959
2960
2961
2962 tsk->flags |= PF_EXITING;
2963
2964 cgroup_threadgroup_change_end(tsk);
2965
2966 if (!task_sigpending(tsk))
2967 goto out;
2968
2969 unblocked = tsk->blocked;
2970 signotset(&unblocked);
2971 retarget_shared_pending(tsk, &unblocked);
2972
2973 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2974 task_participate_group_stop(tsk))
2975 group_stop = CLD_STOPPED;
2976 out:
2977 spin_unlock_irq(&tsk->sighand->siglock);
2978
2979
2980
2981
2982
2983 if (unlikely(group_stop)) {
2984 read_lock(&tasklist_lock);
2985 do_notify_parent_cldstop(tsk, false, group_stop);
2986 read_unlock(&tasklist_lock);
2987 }
2988 }
2989
2990
2991
2992
2993
2994
2995
2996
2997 SYSCALL_DEFINE0(restart_syscall)
2998 {
2999 struct restart_block *restart = ¤t->restart_block;
3000 return restart->fn(restart);
3001 }
3002
3003 long do_no_restart_syscall(struct restart_block *param)
3004 {
3005 return -EINTR;
3006 }
3007
3008 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3009 {
3010 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3011 sigset_t newblocked;
3012
3013 sigandnsets(&newblocked, newset, ¤t->blocked);
3014 retarget_shared_pending(tsk, &newblocked);
3015 }
3016 tsk->blocked = *newset;
3017 recalc_sigpending();
3018 }
3019
3020
3021
3022
3023
3024
3025
3026
3027 void set_current_blocked(sigset_t *newset)
3028 {
3029 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3030 __set_current_blocked(newset);
3031 }
3032
3033 void __set_current_blocked(const sigset_t *newset)
3034 {
3035 struct task_struct *tsk = current;
3036
3037
3038
3039
3040
3041 if (sigequalsets(&tsk->blocked, newset))
3042 return;
3043
3044 spin_lock_irq(&tsk->sighand->siglock);
3045 __set_task_blocked(tsk, newset);
3046 spin_unlock_irq(&tsk->sighand->siglock);
3047 }
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3058 {
3059 struct task_struct *tsk = current;
3060 sigset_t newset;
3061
3062
3063 if (oldset)
3064 *oldset = tsk->blocked;
3065
3066 switch (how) {
3067 case SIG_BLOCK:
3068 sigorsets(&newset, &tsk->blocked, set);
3069 break;
3070 case SIG_UNBLOCK:
3071 sigandnsets(&newset, &tsk->blocked, set);
3072 break;
3073 case SIG_SETMASK:
3074 newset = *set;
3075 break;
3076 default:
3077 return -EINVAL;
3078 }
3079
3080 __set_current_blocked(&newset);
3081 return 0;
3082 }
3083 EXPORT_SYMBOL(sigprocmask);
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3095 {
3096 sigset_t kmask;
3097
3098 if (!umask)
3099 return 0;
3100 if (sigsetsize != sizeof(sigset_t))
3101 return -EINVAL;
3102 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3103 return -EFAULT;
3104
3105 set_restore_sigmask();
3106 current->saved_sigmask = current->blocked;
3107 set_current_blocked(&kmask);
3108
3109 return 0;
3110 }
3111
3112 #ifdef CONFIG_COMPAT
3113 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3114 size_t sigsetsize)
3115 {
3116 sigset_t kmask;
3117
3118 if (!umask)
3119 return 0;
3120 if (sigsetsize != sizeof(compat_sigset_t))
3121 return -EINVAL;
3122 if (get_compat_sigset(&kmask, umask))
3123 return -EFAULT;
3124
3125 set_restore_sigmask();
3126 current->saved_sigmask = current->blocked;
3127 set_current_blocked(&kmask);
3128
3129 return 0;
3130 }
3131 #endif
3132
3133
3134
3135
3136
3137
3138
3139
3140 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3141 sigset_t __user *, oset, size_t, sigsetsize)
3142 {
3143 sigset_t old_set, new_set;
3144 int error;
3145
3146
3147 if (sigsetsize != sizeof(sigset_t))
3148 return -EINVAL;
3149
3150 old_set = current->blocked;
3151
3152 if (nset) {
3153 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3154 return -EFAULT;
3155 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3156
3157 error = sigprocmask(how, &new_set, NULL);
3158 if (error)
3159 return error;
3160 }
3161
3162 if (oset) {
3163 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3164 return -EFAULT;
3165 }
3166
3167 return 0;
3168 }
3169
3170 #ifdef CONFIG_COMPAT
3171 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3172 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3173 {
3174 sigset_t old_set = current->blocked;
3175
3176
3177 if (sigsetsize != sizeof(sigset_t))
3178 return -EINVAL;
3179
3180 if (nset) {
3181 sigset_t new_set;
3182 int error;
3183 if (get_compat_sigset(&new_set, nset))
3184 return -EFAULT;
3185 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3186
3187 error = sigprocmask(how, &new_set, NULL);
3188 if (error)
3189 return error;
3190 }
3191 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3192 }
3193 #endif
3194
3195 static void do_sigpending(sigset_t *set)
3196 {
3197 spin_lock_irq(¤t->sighand->siglock);
3198 sigorsets(set, ¤t->pending.signal,
3199 ¤t->signal->shared_pending.signal);
3200 spin_unlock_irq(¤t->sighand->siglock);
3201
3202
3203 sigandsets(set, ¤t->blocked, set);
3204 }
3205
3206
3207
3208
3209
3210
3211
3212 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3213 {
3214 sigset_t set;
3215
3216 if (sigsetsize > sizeof(*uset))
3217 return -EINVAL;
3218
3219 do_sigpending(&set);
3220
3221 if (copy_to_user(uset, &set, sigsetsize))
3222 return -EFAULT;
3223
3224 return 0;
3225 }
3226
3227 #ifdef CONFIG_COMPAT
3228 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3229 compat_size_t, sigsetsize)
3230 {
3231 sigset_t set;
3232
3233 if (sigsetsize > sizeof(*uset))
3234 return -EINVAL;
3235
3236 do_sigpending(&set);
3237
3238 return put_compat_sigset(uset, &set, sigsetsize);
3239 }
3240 #endif
3241
3242 static const struct {
3243 unsigned char limit, layout;
3244 } sig_sicodes[] = {
3245 [SIGILL] = { NSIGILL, SIL_FAULT },
3246 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3247 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3248 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3249 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3250 #if defined(SIGEMT)
3251 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3252 #endif
3253 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3254 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3255 [SIGSYS] = { NSIGSYS, SIL_SYS },
3256 };
3257
3258 static bool known_siginfo_layout(unsigned sig, int si_code)
3259 {
3260 if (si_code == SI_KERNEL)
3261 return true;
3262 else if ((si_code > SI_USER)) {
3263 if (sig_specific_sicodes(sig)) {
3264 if (si_code <= sig_sicodes[sig].limit)
3265 return true;
3266 }
3267 else if (si_code <= NSIGPOLL)
3268 return true;
3269 }
3270 else if (si_code >= SI_DETHREAD)
3271 return true;
3272 else if (si_code == SI_ASYNCNL)
3273 return true;
3274 return false;
3275 }
3276
3277 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3278 {
3279 enum siginfo_layout layout = SIL_KILL;
3280 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3281 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3282 (si_code <= sig_sicodes[sig].limit)) {
3283 layout = sig_sicodes[sig].layout;
3284
3285 if ((sig == SIGBUS) &&
3286 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3287 layout = SIL_FAULT_MCEERR;
3288 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3289 layout = SIL_FAULT_BNDERR;
3290 #ifdef SEGV_PKUERR
3291 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3292 layout = SIL_FAULT_PKUERR;
3293 #endif
3294 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3295 layout = SIL_FAULT_PERF_EVENT;
3296 else if (IS_ENABLED(CONFIG_SPARC) &&
3297 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3298 layout = SIL_FAULT_TRAPNO;
3299 else if (IS_ENABLED(CONFIG_ALPHA) &&
3300 ((sig == SIGFPE) ||
3301 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3302 layout = SIL_FAULT_TRAPNO;
3303 }
3304 else if (si_code <= NSIGPOLL)
3305 layout = SIL_POLL;
3306 } else {
3307 if (si_code == SI_TIMER)
3308 layout = SIL_TIMER;
3309 else if (si_code == SI_SIGIO)
3310 layout = SIL_POLL;
3311 else if (si_code < 0)
3312 layout = SIL_RT;
3313 }
3314 return layout;
3315 }
3316
3317 static inline char __user *si_expansion(const siginfo_t __user *info)
3318 {
3319 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3320 }
3321
3322 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3323 {
3324 char __user *expansion = si_expansion(to);
3325 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3326 return -EFAULT;
3327 if (clear_user(expansion, SI_EXPANSION_SIZE))
3328 return -EFAULT;
3329 return 0;
3330 }
3331
3332 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3333 const siginfo_t __user *from)
3334 {
3335 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3336 char __user *expansion = si_expansion(from);
3337 char buf[SI_EXPANSION_SIZE];
3338 int i;
3339
3340
3341
3342
3343
3344
3345 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3346 return -EFAULT;
3347 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3348 if (buf[i] != 0)
3349 return -E2BIG;
3350 }
3351 }
3352 return 0;
3353 }
3354
3355 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3356 const siginfo_t __user *from)
3357 {
3358 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3359 return -EFAULT;
3360 to->si_signo = signo;
3361 return post_copy_siginfo_from_user(to, from);
3362 }
3363
3364 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3365 {
3366 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3367 return -EFAULT;
3368 return post_copy_siginfo_from_user(to, from);
3369 }
3370
3371 #ifdef CONFIG_COMPAT
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382 void copy_siginfo_to_external32(struct compat_siginfo *to,
3383 const struct kernel_siginfo *from)
3384 {
3385 memset(to, 0, sizeof(*to));
3386
3387 to->si_signo = from->si_signo;
3388 to->si_errno = from->si_errno;
3389 to->si_code = from->si_code;
3390 switch(siginfo_layout(from->si_signo, from->si_code)) {
3391 case SIL_KILL:
3392 to->si_pid = from->si_pid;
3393 to->si_uid = from->si_uid;
3394 break;
3395 case SIL_TIMER:
3396 to->si_tid = from->si_tid;
3397 to->si_overrun = from->si_overrun;
3398 to->si_int = from->si_int;
3399 break;
3400 case SIL_POLL:
3401 to->si_band = from->si_band;
3402 to->si_fd = from->si_fd;
3403 break;
3404 case SIL_FAULT:
3405 to->si_addr = ptr_to_compat(from->si_addr);
3406 break;
3407 case SIL_FAULT_TRAPNO:
3408 to->si_addr = ptr_to_compat(from->si_addr);
3409 to->si_trapno = from->si_trapno;
3410 break;
3411 case SIL_FAULT_MCEERR:
3412 to->si_addr = ptr_to_compat(from->si_addr);
3413 to->si_addr_lsb = from->si_addr_lsb;
3414 break;
3415 case SIL_FAULT_BNDERR:
3416 to->si_addr = ptr_to_compat(from->si_addr);
3417 to->si_lower = ptr_to_compat(from->si_lower);
3418 to->si_upper = ptr_to_compat(from->si_upper);
3419 break;
3420 case SIL_FAULT_PKUERR:
3421 to->si_addr = ptr_to_compat(from->si_addr);
3422 to->si_pkey = from->si_pkey;
3423 break;
3424 case SIL_FAULT_PERF_EVENT:
3425 to->si_addr = ptr_to_compat(from->si_addr);
3426 to->si_perf_data = from->si_perf_data;
3427 to->si_perf_type = from->si_perf_type;
3428 to->si_perf_flags = from->si_perf_flags;
3429 break;
3430 case SIL_CHLD:
3431 to->si_pid = from->si_pid;
3432 to->si_uid = from->si_uid;
3433 to->si_status = from->si_status;
3434 to->si_utime = from->si_utime;
3435 to->si_stime = from->si_stime;
3436 break;
3437 case SIL_RT:
3438 to->si_pid = from->si_pid;
3439 to->si_uid = from->si_uid;
3440 to->si_int = from->si_int;
3441 break;
3442 case SIL_SYS:
3443 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3444 to->si_syscall = from->si_syscall;
3445 to->si_arch = from->si_arch;
3446 break;
3447 }
3448 }
3449
3450 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3451 const struct kernel_siginfo *from)
3452 {
3453 struct compat_siginfo new;
3454
3455 copy_siginfo_to_external32(&new, from);
3456 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3457 return -EFAULT;
3458 return 0;
3459 }
3460
3461 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3462 const struct compat_siginfo *from)
3463 {
3464 clear_siginfo(to);
3465 to->si_signo = from->si_signo;
3466 to->si_errno = from->si_errno;
3467 to->si_code = from->si_code;
3468 switch(siginfo_layout(from->si_signo, from->si_code)) {
3469 case SIL_KILL:
3470 to->si_pid = from->si_pid;
3471 to->si_uid = from->si_uid;
3472 break;
3473 case SIL_TIMER:
3474 to->si_tid = from->si_tid;
3475 to->si_overrun = from->si_overrun;
3476 to->si_int = from->si_int;
3477 break;
3478 case SIL_POLL:
3479 to->si_band = from->si_band;
3480 to->si_fd = from->si_fd;
3481 break;
3482 case SIL_FAULT:
3483 to->si_addr = compat_ptr(from->si_addr);
3484 break;
3485 case SIL_FAULT_TRAPNO:
3486 to->si_addr = compat_ptr(from->si_addr);
3487 to->si_trapno = from->si_trapno;
3488 break;
3489 case SIL_FAULT_MCEERR:
3490 to->si_addr = compat_ptr(from->si_addr);
3491 to->si_addr_lsb = from->si_addr_lsb;
3492 break;
3493 case SIL_FAULT_BNDERR:
3494 to->si_addr = compat_ptr(from->si_addr);
3495 to->si_lower = compat_ptr(from->si_lower);
3496 to->si_upper = compat_ptr(from->si_upper);
3497 break;
3498 case SIL_FAULT_PKUERR:
3499 to->si_addr = compat_ptr(from->si_addr);
3500 to->si_pkey = from->si_pkey;
3501 break;
3502 case SIL_FAULT_PERF_EVENT:
3503 to->si_addr = compat_ptr(from->si_addr);
3504 to->si_perf_data = from->si_perf_data;
3505 to->si_perf_type = from->si_perf_type;
3506 to->si_perf_flags = from->si_perf_flags;
3507 break;
3508 case SIL_CHLD:
3509 to->si_pid = from->si_pid;
3510 to->si_uid = from->si_uid;
3511 to->si_status = from->si_status;
3512 #ifdef CONFIG_X86_X32_ABI
3513 if (in_x32_syscall()) {
3514 to->si_utime = from->_sifields._sigchld_x32._utime;
3515 to->si_stime = from->_sifields._sigchld_x32._stime;
3516 } else
3517 #endif
3518 {
3519 to->si_utime = from->si_utime;
3520 to->si_stime = from->si_stime;
3521 }
3522 break;
3523 case SIL_RT:
3524 to->si_pid = from->si_pid;
3525 to->si_uid = from->si_uid;
3526 to->si_int = from->si_int;
3527 break;
3528 case SIL_SYS:
3529 to->si_call_addr = compat_ptr(from->si_call_addr);
3530 to->si_syscall = from->si_syscall;
3531 to->si_arch = from->si_arch;
3532 break;
3533 }
3534 return 0;
3535 }
3536
3537 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3538 const struct compat_siginfo __user *ufrom)
3539 {
3540 struct compat_siginfo from;
3541
3542 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3543 return -EFAULT;
3544
3545 from.si_signo = signo;
3546 return post_copy_siginfo_from_user32(to, &from);
3547 }
3548
3549 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3550 const struct compat_siginfo __user *ufrom)
3551 {
3552 struct compat_siginfo from;
3553
3554 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3555 return -EFAULT;
3556
3557 return post_copy_siginfo_from_user32(to, &from);
3558 }
3559 #endif
3560
3561
3562
3563
3564
3565
3566
3567 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3568 const struct timespec64 *ts)
3569 {
3570 ktime_t *to = NULL, timeout = KTIME_MAX;
3571 struct task_struct *tsk = current;
3572 sigset_t mask = *which;
3573 enum pid_type type;
3574 int sig, ret = 0;
3575
3576 if (ts) {
3577 if (!timespec64_valid(ts))
3578 return -EINVAL;
3579 timeout = timespec64_to_ktime(*ts);
3580 to = &timeout;
3581 }
3582
3583
3584
3585
3586 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3587 signotset(&mask);
3588
3589 spin_lock_irq(&tsk->sighand->siglock);
3590 sig = dequeue_signal(tsk, &mask, info, &type);
3591 if (!sig && timeout) {
3592
3593
3594
3595
3596
3597
3598 tsk->real_blocked = tsk->blocked;
3599 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3600 recalc_sigpending();
3601 spin_unlock_irq(&tsk->sighand->siglock);
3602
3603 __set_current_state(TASK_INTERRUPTIBLE);
3604 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3605 HRTIMER_MODE_REL);
3606 spin_lock_irq(&tsk->sighand->siglock);
3607 __set_task_blocked(tsk, &tsk->real_blocked);
3608 sigemptyset(&tsk->real_blocked);
3609 sig = dequeue_signal(tsk, &mask, info, &type);
3610 }
3611 spin_unlock_irq(&tsk->sighand->siglock);
3612
3613 if (sig)
3614 return sig;
3615 return ret ? -EINTR : -EAGAIN;
3616 }
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3627 siginfo_t __user *, uinfo,
3628 const struct __kernel_timespec __user *, uts,
3629 size_t, sigsetsize)
3630 {
3631 sigset_t these;
3632 struct timespec64 ts;
3633 kernel_siginfo_t info;
3634 int ret;
3635
3636
3637 if (sigsetsize != sizeof(sigset_t))
3638 return -EINVAL;
3639
3640 if (copy_from_user(&these, uthese, sizeof(these)))
3641 return -EFAULT;
3642
3643 if (uts) {
3644 if (get_timespec64(&ts, uts))
3645 return -EFAULT;
3646 }
3647
3648 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3649
3650 if (ret > 0 && uinfo) {
3651 if (copy_siginfo_to_user(uinfo, &info))
3652 ret = -EFAULT;
3653 }
3654
3655 return ret;
3656 }
3657
3658 #ifdef CONFIG_COMPAT_32BIT_TIME
3659 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3660 siginfo_t __user *, uinfo,
3661 const struct old_timespec32 __user *, uts,
3662 size_t, sigsetsize)
3663 {
3664 sigset_t these;
3665 struct timespec64 ts;
3666 kernel_siginfo_t info;
3667 int ret;
3668
3669 if (sigsetsize != sizeof(sigset_t))
3670 return -EINVAL;
3671
3672 if (copy_from_user(&these, uthese, sizeof(these)))
3673 return -EFAULT;
3674
3675 if (uts) {
3676 if (get_old_timespec32(&ts, uts))
3677 return -EFAULT;
3678 }
3679
3680 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3681
3682 if (ret > 0 && uinfo) {
3683 if (copy_siginfo_to_user(uinfo, &info))
3684 ret = -EFAULT;
3685 }
3686
3687 return ret;
3688 }
3689 #endif
3690
3691 #ifdef CONFIG_COMPAT
3692 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3693 struct compat_siginfo __user *, uinfo,
3694 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3695 {
3696 sigset_t s;
3697 struct timespec64 t;
3698 kernel_siginfo_t info;
3699 long ret;
3700
3701 if (sigsetsize != sizeof(sigset_t))
3702 return -EINVAL;
3703
3704 if (get_compat_sigset(&s, uthese))
3705 return -EFAULT;
3706
3707 if (uts) {
3708 if (get_timespec64(&t, uts))
3709 return -EFAULT;
3710 }
3711
3712 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3713
3714 if (ret > 0 && uinfo) {
3715 if (copy_siginfo_to_user32(uinfo, &info))
3716 ret = -EFAULT;
3717 }
3718
3719 return ret;
3720 }
3721
3722 #ifdef CONFIG_COMPAT_32BIT_TIME
3723 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3724 struct compat_siginfo __user *, uinfo,
3725 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3726 {
3727 sigset_t s;
3728 struct timespec64 t;
3729 kernel_siginfo_t info;
3730 long ret;
3731
3732 if (sigsetsize != sizeof(sigset_t))
3733 return -EINVAL;
3734
3735 if (get_compat_sigset(&s, uthese))
3736 return -EFAULT;
3737
3738 if (uts) {
3739 if (get_old_timespec32(&t, uts))
3740 return -EFAULT;
3741 }
3742
3743 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3744
3745 if (ret > 0 && uinfo) {
3746 if (copy_siginfo_to_user32(uinfo, &info))
3747 ret = -EFAULT;
3748 }
3749
3750 return ret;
3751 }
3752 #endif
3753 #endif
3754
3755 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3756 {
3757 clear_siginfo(info);
3758 info->si_signo = sig;
3759 info->si_errno = 0;
3760 info->si_code = SI_USER;
3761 info->si_pid = task_tgid_vnr(current);
3762 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3763 }
3764
3765
3766
3767
3768
3769
3770 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3771 {
3772 struct kernel_siginfo info;
3773
3774 prepare_kill_siginfo(sig, &info);
3775
3776 return kill_something_info(sig, &info, pid);
3777 }
3778
3779
3780
3781
3782
3783
3784 static bool access_pidfd_pidns(struct pid *pid)
3785 {
3786 struct pid_namespace *active = task_active_pid_ns(current);
3787 struct pid_namespace *p = ns_of_pid(pid);
3788
3789 for (;;) {
3790 if (!p)
3791 return false;
3792 if (p == active)
3793 break;
3794 p = p->parent;
3795 }
3796
3797 return true;
3798 }
3799
3800 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3801 siginfo_t __user *info)
3802 {
3803 #ifdef CONFIG_COMPAT
3804
3805
3806
3807
3808
3809 if (in_compat_syscall())
3810 return copy_siginfo_from_user32(
3811 kinfo, (struct compat_siginfo __user *)info);
3812 #endif
3813 return copy_siginfo_from_user(kinfo, info);
3814 }
3815
3816 static struct pid *pidfd_to_pid(const struct file *file)
3817 {
3818 struct pid *pid;
3819
3820 pid = pidfd_pid(file);
3821 if (!IS_ERR(pid))
3822 return pid;
3823
3824 return tgid_pidfd_to_pid(file);
3825 }
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3846 siginfo_t __user *, info, unsigned int, flags)
3847 {
3848 int ret;
3849 struct fd f;
3850 struct pid *pid;
3851 kernel_siginfo_t kinfo;
3852
3853
3854 if (flags)
3855 return -EINVAL;
3856
3857 f = fdget(pidfd);
3858 if (!f.file)
3859 return -EBADF;
3860
3861
3862 pid = pidfd_to_pid(f.file);
3863 if (IS_ERR(pid)) {
3864 ret = PTR_ERR(pid);
3865 goto err;
3866 }
3867
3868 ret = -EINVAL;
3869 if (!access_pidfd_pidns(pid))
3870 goto err;
3871
3872 if (info) {
3873 ret = copy_siginfo_from_user_any(&kinfo, info);
3874 if (unlikely(ret))
3875 goto err;
3876
3877 ret = -EINVAL;
3878 if (unlikely(sig != kinfo.si_signo))
3879 goto err;
3880
3881
3882 ret = -EPERM;
3883 if ((task_pid(current) != pid) &&
3884 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3885 goto err;
3886 } else {
3887 prepare_kill_siginfo(sig, &kinfo);
3888 }
3889
3890 ret = kill_pid_info(sig, &kinfo, pid);
3891
3892 err:
3893 fdput(f);
3894 return ret;
3895 }
3896
3897 static int
3898 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3899 {
3900 struct task_struct *p;
3901 int error = -ESRCH;
3902
3903 rcu_read_lock();
3904 p = find_task_by_vpid(pid);
3905 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3906 error = check_kill_permission(sig, info, p);
3907
3908
3909
3910
3911 if (!error && sig) {
3912 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3913
3914
3915
3916
3917
3918 if (unlikely(error == -ESRCH))
3919 error = 0;
3920 }
3921 }
3922 rcu_read_unlock();
3923
3924 return error;
3925 }
3926
3927 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3928 {
3929 struct kernel_siginfo info;
3930
3931 clear_siginfo(&info);
3932 info.si_signo = sig;
3933 info.si_errno = 0;
3934 info.si_code = SI_TKILL;
3935 info.si_pid = task_tgid_vnr(current);
3936 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3937
3938 return do_send_specific(tgid, pid, sig, &info);
3939 }
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3952 {
3953
3954 if (pid <= 0 || tgid <= 0)
3955 return -EINVAL;
3956
3957 return do_tkill(tgid, pid, sig);
3958 }
3959
3960
3961
3962
3963
3964
3965
3966
3967 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3968 {
3969
3970 if (pid <= 0)
3971 return -EINVAL;
3972
3973 return do_tkill(0, pid, sig);
3974 }
3975
3976 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3977 {
3978
3979
3980
3981 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3982 (task_pid_vnr(current) != pid))
3983 return -EPERM;
3984
3985
3986 return kill_proc_info(sig, info, pid);
3987 }
3988
3989
3990
3991
3992
3993
3994
3995 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3996 siginfo_t __user *, uinfo)
3997 {
3998 kernel_siginfo_t info;
3999 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4000 if (unlikely(ret))
4001 return ret;
4002 return do_rt_sigqueueinfo(pid, sig, &info);
4003 }
4004
4005 #ifdef CONFIG_COMPAT
4006 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4007 compat_pid_t, pid,
4008 int, sig,
4009 struct compat_siginfo __user *, uinfo)
4010 {
4011 kernel_siginfo_t info;
4012 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4013 if (unlikely(ret))
4014 return ret;
4015 return do_rt_sigqueueinfo(pid, sig, &info);
4016 }
4017 #endif
4018
4019 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4020 {
4021
4022 if (pid <= 0 || tgid <= 0)
4023 return -EINVAL;
4024
4025
4026
4027
4028 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4029 (task_pid_vnr(current) != pid))
4030 return -EPERM;
4031
4032 return do_send_specific(tgid, pid, sig, info);
4033 }
4034
4035 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4036 siginfo_t __user *, uinfo)
4037 {
4038 kernel_siginfo_t info;
4039 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4040 if (unlikely(ret))
4041 return ret;
4042 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4043 }
4044
4045 #ifdef CONFIG_COMPAT
4046 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4047 compat_pid_t, tgid,
4048 compat_pid_t, pid,
4049 int, sig,
4050 struct compat_siginfo __user *, uinfo)
4051 {
4052 kernel_siginfo_t info;
4053 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4054 if (unlikely(ret))
4055 return ret;
4056 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4057 }
4058 #endif
4059
4060
4061
4062
4063 void kernel_sigaction(int sig, __sighandler_t action)
4064 {
4065 spin_lock_irq(¤t->sighand->siglock);
4066 current->sighand->action[sig - 1].sa.sa_handler = action;
4067 if (action == SIG_IGN) {
4068 sigset_t mask;
4069
4070 sigemptyset(&mask);
4071 sigaddset(&mask, sig);
4072
4073 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4074 flush_sigqueue_mask(&mask, ¤t->pending);
4075 recalc_sigpending();
4076 }
4077 spin_unlock_irq(¤t->sighand->siglock);
4078 }
4079 EXPORT_SYMBOL(kernel_sigaction);
4080
4081 void __weak sigaction_compat_abi(struct k_sigaction *act,
4082 struct k_sigaction *oact)
4083 {
4084 }
4085
4086 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4087 {
4088 struct task_struct *p = current, *t;
4089 struct k_sigaction *k;
4090 sigset_t mask;
4091
4092 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4093 return -EINVAL;
4094
4095 k = &p->sighand->action[sig-1];
4096
4097 spin_lock_irq(&p->sighand->siglock);
4098 if (k->sa.sa_flags & SA_IMMUTABLE) {
4099 spin_unlock_irq(&p->sighand->siglock);
4100 return -EINVAL;
4101 }
4102 if (oact)
4103 *oact = *k;
4104
4105
4106
4107
4108
4109 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4110
4111
4112
4113
4114
4115
4116 if (act)
4117 act->sa.sa_flags &= UAPI_SA_FLAGS;
4118 if (oact)
4119 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4120
4121 sigaction_compat_abi(act, oact);
4122
4123 if (act) {
4124 sigdelsetmask(&act->sa.sa_mask,
4125 sigmask(SIGKILL) | sigmask(SIGSTOP));
4126 *k = *act;
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4139 sigemptyset(&mask);
4140 sigaddset(&mask, sig);
4141 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4142 for_each_thread(p, t)
4143 flush_sigqueue_mask(&mask, &t->pending);
4144 }
4145 }
4146
4147 spin_unlock_irq(&p->sighand->siglock);
4148 return 0;
4149 }
4150
4151 #ifdef CONFIG_DYNAMIC_SIGFRAME
4152 static inline void sigaltstack_lock(void)
4153 __acquires(¤t->sighand->siglock)
4154 {
4155 spin_lock_irq(¤t->sighand->siglock);
4156 }
4157
4158 static inline void sigaltstack_unlock(void)
4159 __releases(¤t->sighand->siglock)
4160 {
4161 spin_unlock_irq(¤t->sighand->siglock);
4162 }
4163 #else
4164 static inline void sigaltstack_lock(void) { }
4165 static inline void sigaltstack_unlock(void) { }
4166 #endif
4167
4168 static int
4169 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4170 size_t min_ss_size)
4171 {
4172 struct task_struct *t = current;
4173 int ret = 0;
4174
4175 if (oss) {
4176 memset(oss, 0, sizeof(stack_t));
4177 oss->ss_sp = (void __user *) t->sas_ss_sp;
4178 oss->ss_size = t->sas_ss_size;
4179 oss->ss_flags = sas_ss_flags(sp) |
4180 (current->sas_ss_flags & SS_FLAG_BITS);
4181 }
4182
4183 if (ss) {
4184 void __user *ss_sp = ss->ss_sp;
4185 size_t ss_size = ss->ss_size;
4186 unsigned ss_flags = ss->ss_flags;
4187 int ss_mode;
4188
4189 if (unlikely(on_sig_stack(sp)))
4190 return -EPERM;
4191
4192 ss_mode = ss_flags & ~SS_FLAG_BITS;
4193 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4194 ss_mode != 0))
4195 return -EINVAL;
4196
4197
4198
4199
4200
4201 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4202 t->sas_ss_size == ss_size &&
4203 t->sas_ss_flags == ss_flags)
4204 return 0;
4205
4206 sigaltstack_lock();
4207 if (ss_mode == SS_DISABLE) {
4208 ss_size = 0;
4209 ss_sp = NULL;
4210 } else {
4211 if (unlikely(ss_size < min_ss_size))
4212 ret = -ENOMEM;
4213 if (!sigaltstack_size_valid(ss_size))
4214 ret = -ENOMEM;
4215 }
4216 if (!ret) {
4217 t->sas_ss_sp = (unsigned long) ss_sp;
4218 t->sas_ss_size = ss_size;
4219 t->sas_ss_flags = ss_flags;
4220 }
4221 sigaltstack_unlock();
4222 }
4223 return ret;
4224 }
4225
4226 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4227 {
4228 stack_t new, old;
4229 int err;
4230 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4231 return -EFAULT;
4232 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4233 current_user_stack_pointer(),
4234 MINSIGSTKSZ);
4235 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4236 err = -EFAULT;
4237 return err;
4238 }
4239
4240 int restore_altstack(const stack_t __user *uss)
4241 {
4242 stack_t new;
4243 if (copy_from_user(&new, uss, sizeof(stack_t)))
4244 return -EFAULT;
4245 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4246 MINSIGSTKSZ);
4247
4248 return 0;
4249 }
4250
4251 int __save_altstack(stack_t __user *uss, unsigned long sp)
4252 {
4253 struct task_struct *t = current;
4254 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4255 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4256 __put_user(t->sas_ss_size, &uss->ss_size);
4257 return err;
4258 }
4259
4260 #ifdef CONFIG_COMPAT
4261 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4262 compat_stack_t __user *uoss_ptr)
4263 {
4264 stack_t uss, uoss;
4265 int ret;
4266
4267 if (uss_ptr) {
4268 compat_stack_t uss32;
4269 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4270 return -EFAULT;
4271 uss.ss_sp = compat_ptr(uss32.ss_sp);
4272 uss.ss_flags = uss32.ss_flags;
4273 uss.ss_size = uss32.ss_size;
4274 }
4275 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4276 compat_user_stack_pointer(),
4277 COMPAT_MINSIGSTKSZ);
4278 if (ret >= 0 && uoss_ptr) {
4279 compat_stack_t old;
4280 memset(&old, 0, sizeof(old));
4281 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4282 old.ss_flags = uoss.ss_flags;
4283 old.ss_size = uoss.ss_size;
4284 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4285 ret = -EFAULT;
4286 }
4287 return ret;
4288 }
4289
4290 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4291 const compat_stack_t __user *, uss_ptr,
4292 compat_stack_t __user *, uoss_ptr)
4293 {
4294 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4295 }
4296
4297 int compat_restore_altstack(const compat_stack_t __user *uss)
4298 {
4299 int err = do_compat_sigaltstack(uss, NULL);
4300
4301 return err == -EFAULT ? err : 0;
4302 }
4303
4304 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4305 {
4306 int err;
4307 struct task_struct *t = current;
4308 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4309 &uss->ss_sp) |
4310 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4311 __put_user(t->sas_ss_size, &uss->ss_size);
4312 return err;
4313 }
4314 #endif
4315
4316 #ifdef __ARCH_WANT_SYS_SIGPENDING
4317
4318
4319
4320
4321
4322 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4323 {
4324 sigset_t set;
4325
4326 if (sizeof(old_sigset_t) > sizeof(*uset))
4327 return -EINVAL;
4328
4329 do_sigpending(&set);
4330
4331 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4332 return -EFAULT;
4333
4334 return 0;
4335 }
4336
4337 #ifdef CONFIG_COMPAT
4338 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4339 {
4340 sigset_t set;
4341
4342 do_sigpending(&set);
4343
4344 return put_user(set.sig[0], set32);
4345 }
4346 #endif
4347
4348 #endif
4349
4350 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4362 old_sigset_t __user *, oset)
4363 {
4364 old_sigset_t old_set, new_set;
4365 sigset_t new_blocked;
4366
4367 old_set = current->blocked.sig[0];
4368
4369 if (nset) {
4370 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4371 return -EFAULT;
4372
4373 new_blocked = current->blocked;
4374
4375 switch (how) {
4376 case SIG_BLOCK:
4377 sigaddsetmask(&new_blocked, new_set);
4378 break;
4379 case SIG_UNBLOCK:
4380 sigdelsetmask(&new_blocked, new_set);
4381 break;
4382 case SIG_SETMASK:
4383 new_blocked.sig[0] = new_set;
4384 break;
4385 default:
4386 return -EINVAL;
4387 }
4388
4389 set_current_blocked(&new_blocked);
4390 }
4391
4392 if (oset) {
4393 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4394 return -EFAULT;
4395 }
4396
4397 return 0;
4398 }
4399 #endif
4400
4401 #ifndef CONFIG_ODD_RT_SIGACTION
4402
4403
4404
4405
4406
4407
4408
4409 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4410 const struct sigaction __user *, act,
4411 struct sigaction __user *, oact,
4412 size_t, sigsetsize)
4413 {
4414 struct k_sigaction new_sa, old_sa;
4415 int ret;
4416
4417
4418 if (sigsetsize != sizeof(sigset_t))
4419 return -EINVAL;
4420
4421 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4422 return -EFAULT;
4423
4424 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4425 if (ret)
4426 return ret;
4427
4428 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4429 return -EFAULT;
4430
4431 return 0;
4432 }
4433 #ifdef CONFIG_COMPAT
4434 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4435 const struct compat_sigaction __user *, act,
4436 struct compat_sigaction __user *, oact,
4437 compat_size_t, sigsetsize)
4438 {
4439 struct k_sigaction new_ka, old_ka;
4440 #ifdef __ARCH_HAS_SA_RESTORER
4441 compat_uptr_t restorer;
4442 #endif
4443 int ret;
4444
4445
4446 if (sigsetsize != sizeof(compat_sigset_t))
4447 return -EINVAL;
4448
4449 if (act) {
4450 compat_uptr_t handler;
4451 ret = get_user(handler, &act->sa_handler);
4452 new_ka.sa.sa_handler = compat_ptr(handler);
4453 #ifdef __ARCH_HAS_SA_RESTORER
4454 ret |= get_user(restorer, &act->sa_restorer);
4455 new_ka.sa.sa_restorer = compat_ptr(restorer);
4456 #endif
4457 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4458 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4459 if (ret)
4460 return -EFAULT;
4461 }
4462
4463 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4464 if (!ret && oact) {
4465 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4466 &oact->sa_handler);
4467 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4468 sizeof(oact->sa_mask));
4469 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4470 #ifdef __ARCH_HAS_SA_RESTORER
4471 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4472 &oact->sa_restorer);
4473 #endif
4474 }
4475 return ret;
4476 }
4477 #endif
4478 #endif
4479
4480 #ifdef CONFIG_OLD_SIGACTION
4481 SYSCALL_DEFINE3(sigaction, int, sig,
4482 const struct old_sigaction __user *, act,
4483 struct old_sigaction __user *, oact)
4484 {
4485 struct k_sigaction new_ka, old_ka;
4486 int ret;
4487
4488 if (act) {
4489 old_sigset_t mask;
4490 if (!access_ok(act, sizeof(*act)) ||
4491 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4492 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4493 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4494 __get_user(mask, &act->sa_mask))
4495 return -EFAULT;
4496 #ifdef __ARCH_HAS_KA_RESTORER
4497 new_ka.ka_restorer = NULL;
4498 #endif
4499 siginitset(&new_ka.sa.sa_mask, mask);
4500 }
4501
4502 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4503
4504 if (!ret && oact) {
4505 if (!access_ok(oact, sizeof(*oact)) ||
4506 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4507 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4508 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4509 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4510 return -EFAULT;
4511 }
4512
4513 return ret;
4514 }
4515 #endif
4516 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4517 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4518 const struct compat_old_sigaction __user *, act,
4519 struct compat_old_sigaction __user *, oact)
4520 {
4521 struct k_sigaction new_ka, old_ka;
4522 int ret;
4523 compat_old_sigset_t mask;
4524 compat_uptr_t handler, restorer;
4525
4526 if (act) {
4527 if (!access_ok(act, sizeof(*act)) ||
4528 __get_user(handler, &act->sa_handler) ||
4529 __get_user(restorer, &act->sa_restorer) ||
4530 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4531 __get_user(mask, &act->sa_mask))
4532 return -EFAULT;
4533
4534 #ifdef __ARCH_HAS_KA_RESTORER
4535 new_ka.ka_restorer = NULL;
4536 #endif
4537 new_ka.sa.sa_handler = compat_ptr(handler);
4538 new_ka.sa.sa_restorer = compat_ptr(restorer);
4539 siginitset(&new_ka.sa.sa_mask, mask);
4540 }
4541
4542 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4543
4544 if (!ret && oact) {
4545 if (!access_ok(oact, sizeof(*oact)) ||
4546 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4547 &oact->sa_handler) ||
4548 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4549 &oact->sa_restorer) ||
4550 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4551 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4552 return -EFAULT;
4553 }
4554 return ret;
4555 }
4556 #endif
4557
4558 #ifdef CONFIG_SGETMASK_SYSCALL
4559
4560
4561
4562
4563 SYSCALL_DEFINE0(sgetmask)
4564 {
4565
4566 return current->blocked.sig[0];
4567 }
4568
4569 SYSCALL_DEFINE1(ssetmask, int, newmask)
4570 {
4571 int old = current->blocked.sig[0];
4572 sigset_t newset;
4573
4574 siginitset(&newset, newmask);
4575 set_current_blocked(&newset);
4576
4577 return old;
4578 }
4579 #endif
4580
4581 #ifdef __ARCH_WANT_SYS_SIGNAL
4582
4583
4584
4585 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4586 {
4587 struct k_sigaction new_sa, old_sa;
4588 int ret;
4589
4590 new_sa.sa.sa_handler = handler;
4591 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4592 sigemptyset(&new_sa.sa.sa_mask);
4593
4594 ret = do_sigaction(sig, &new_sa, &old_sa);
4595
4596 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4597 }
4598 #endif
4599
4600 #ifdef __ARCH_WANT_SYS_PAUSE
4601
4602 SYSCALL_DEFINE0(pause)
4603 {
4604 while (!signal_pending(current)) {
4605 __set_current_state(TASK_INTERRUPTIBLE);
4606 schedule();
4607 }
4608 return -ERESTARTNOHAND;
4609 }
4610
4611 #endif
4612
4613 static int sigsuspend(sigset_t *set)
4614 {
4615 current->saved_sigmask = current->blocked;
4616 set_current_blocked(set);
4617
4618 while (!signal_pending(current)) {
4619 __set_current_state(TASK_INTERRUPTIBLE);
4620 schedule();
4621 }
4622 set_restore_sigmask();
4623 return -ERESTARTNOHAND;
4624 }
4625
4626
4627
4628
4629
4630
4631
4632 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4633 {
4634 sigset_t newset;
4635
4636
4637 if (sigsetsize != sizeof(sigset_t))
4638 return -EINVAL;
4639
4640 if (copy_from_user(&newset, unewset, sizeof(newset)))
4641 return -EFAULT;
4642 return sigsuspend(&newset);
4643 }
4644
4645 #ifdef CONFIG_COMPAT
4646 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4647 {
4648 sigset_t newset;
4649
4650
4651 if (sigsetsize != sizeof(sigset_t))
4652 return -EINVAL;
4653
4654 if (get_compat_sigset(&newset, unewset))
4655 return -EFAULT;
4656 return sigsuspend(&newset);
4657 }
4658 #endif
4659
4660 #ifdef CONFIG_OLD_SIGSUSPEND
4661 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4662 {
4663 sigset_t blocked;
4664 siginitset(&blocked, mask);
4665 return sigsuspend(&blocked);
4666 }
4667 #endif
4668 #ifdef CONFIG_OLD_SIGSUSPEND3
4669 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4670 {
4671 sigset_t blocked;
4672 siginitset(&blocked, mask);
4673 return sigsuspend(&blocked);
4674 }
4675 #endif
4676
4677 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4678 {
4679 return NULL;
4680 }
4681
4682 static inline void siginfo_buildtime_checks(void)
4683 {
4684 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4685
4686
4687 #define CHECK_OFFSET(field) \
4688 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4689
4690
4691 CHECK_OFFSET(si_pid);
4692 CHECK_OFFSET(si_uid);
4693
4694
4695 CHECK_OFFSET(si_tid);
4696 CHECK_OFFSET(si_overrun);
4697 CHECK_OFFSET(si_value);
4698
4699
4700 CHECK_OFFSET(si_pid);
4701 CHECK_OFFSET(si_uid);
4702 CHECK_OFFSET(si_value);
4703
4704
4705 CHECK_OFFSET(si_pid);
4706 CHECK_OFFSET(si_uid);
4707 CHECK_OFFSET(si_status);
4708 CHECK_OFFSET(si_utime);
4709 CHECK_OFFSET(si_stime);
4710
4711
4712 CHECK_OFFSET(si_addr);
4713 CHECK_OFFSET(si_trapno);
4714 CHECK_OFFSET(si_addr_lsb);
4715 CHECK_OFFSET(si_lower);
4716 CHECK_OFFSET(si_upper);
4717 CHECK_OFFSET(si_pkey);
4718 CHECK_OFFSET(si_perf_data);
4719 CHECK_OFFSET(si_perf_type);
4720 CHECK_OFFSET(si_perf_flags);
4721
4722
4723 CHECK_OFFSET(si_band);
4724 CHECK_OFFSET(si_fd);
4725
4726
4727 CHECK_OFFSET(si_call_addr);
4728 CHECK_OFFSET(si_syscall);
4729 CHECK_OFFSET(si_arch);
4730 #undef CHECK_OFFSET
4731
4732
4733 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4734 offsetof(struct siginfo, si_addr));
4735 if (sizeof(int) == sizeof(void __user *)) {
4736 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4737 sizeof(void __user *));
4738 } else {
4739 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4740 sizeof_field(struct siginfo, si_uid)) !=
4741 sizeof(void __user *));
4742 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4743 offsetof(struct siginfo, si_uid));
4744 }
4745 #ifdef CONFIG_COMPAT
4746 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4747 offsetof(struct compat_siginfo, si_addr));
4748 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4749 sizeof(compat_uptr_t));
4750 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4751 sizeof_field(struct siginfo, si_pid));
4752 #endif
4753 }
4754
4755 void __init signals_init(void)
4756 {
4757 siginfo_buildtime_checks();
4758
4759 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4760 }
4761
4762 #ifdef CONFIG_KGDB_KDB
4763 #include <linux/kdb.h>
4764
4765
4766
4767
4768
4769
4770 void kdb_send_sig(struct task_struct *t, int sig)
4771 {
4772 static struct task_struct *kdb_prev_t;
4773 int new_t, ret;
4774 if (!spin_trylock(&t->sighand->siglock)) {
4775 kdb_printf("Can't do kill command now.\n"
4776 "The sigmask lock is held somewhere else in "
4777 "kernel, try again later\n");
4778 return;
4779 }
4780 new_t = kdb_prev_t != t;
4781 kdb_prev_t = t;
4782 if (!task_is_running(t) && new_t) {
4783 spin_unlock(&t->sighand->siglock);
4784 kdb_printf("Process is not RUNNING, sending a signal from "
4785 "kdb risks deadlock\n"
4786 "on the run queue locks. "
4787 "The signal has _not_ been sent.\n"
4788 "Reissue the kill command if you want to risk "
4789 "the deadlock.\n");
4790 return;
4791 }
4792 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4793 spin_unlock(&t->sighand->siglock);
4794 if (ret)
4795 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4796 sig, t->pid);
4797 else
4798 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4799 }
4800 #endif