Back to home page

LXR

 
 

    


0001 /*
0002  *  linux/kernel/exit.c
0003  *
0004  *  Copyright (C) 1991, 1992  Linus Torvalds
0005  */
0006 
0007 #include <linux/mm.h>
0008 #include <linux/slab.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/module.h>
0011 #include <linux/capability.h>
0012 #include <linux/completion.h>
0013 #include <linux/personality.h>
0014 #include <linux/tty.h>
0015 #include <linux/iocontext.h>
0016 #include <linux/key.h>
0017 #include <linux/security.h>
0018 #include <linux/cpu.h>
0019 #include <linux/acct.h>
0020 #include <linux/tsacct_kern.h>
0021 #include <linux/file.h>
0022 #include <linux/fdtable.h>
0023 #include <linux/freezer.h>
0024 #include <linux/binfmts.h>
0025 #include <linux/nsproxy.h>
0026 #include <linux/pid_namespace.h>
0027 #include <linux/ptrace.h>
0028 #include <linux/profile.h>
0029 #include <linux/mount.h>
0030 #include <linux/proc_fs.h>
0031 #include <linux/kthread.h>
0032 #include <linux/mempolicy.h>
0033 #include <linux/taskstats_kern.h>
0034 #include <linux/delayacct.h>
0035 #include <linux/cgroup.h>
0036 #include <linux/syscalls.h>
0037 #include <linux/signal.h>
0038 #include <linux/posix-timers.h>
0039 #include <linux/cn_proc.h>
0040 #include <linux/mutex.h>
0041 #include <linux/futex.h>
0042 #include <linux/pipe_fs_i.h>
0043 #include <linux/audit.h> /* for audit_free() */
0044 #include <linux/resource.h>
0045 #include <linux/blkdev.h>
0046 #include <linux/task_io_accounting_ops.h>
0047 #include <linux/tracehook.h>
0048 #include <linux/fs_struct.h>
0049 #include <linux/init_task.h>
0050 #include <linux/perf_event.h>
0051 #include <trace/events/sched.h>
0052 #include <linux/hw_breakpoint.h>
0053 #include <linux/oom.h>
0054 #include <linux/writeback.h>
0055 #include <linux/shm.h>
0056 #include <linux/kcov.h>
0057 #include <linux/random.h>
0058 
0059 #include <linux/uaccess.h>
0060 #include <asm/unistd.h>
0061 #include <asm/pgtable.h>
0062 #include <asm/mmu_context.h>
0063 
0064 static void __unhash_process(struct task_struct *p, bool group_dead)
0065 {
0066     nr_threads--;
0067     detach_pid(p, PIDTYPE_PID);
0068     if (group_dead) {
0069         detach_pid(p, PIDTYPE_PGID);
0070         detach_pid(p, PIDTYPE_SID);
0071 
0072         list_del_rcu(&p->tasks);
0073         list_del_init(&p->sibling);
0074         __this_cpu_dec(process_counts);
0075     }
0076     list_del_rcu(&p->thread_group);
0077     list_del_rcu(&p->thread_node);
0078 }
0079 
0080 /*
0081  * This function expects the tasklist_lock write-locked.
0082  */
0083 static void __exit_signal(struct task_struct *tsk)
0084 {
0085     struct signal_struct *sig = tsk->signal;
0086     bool group_dead = thread_group_leader(tsk);
0087     struct sighand_struct *sighand;
0088     struct tty_struct *uninitialized_var(tty);
0089     cputime_t utime, stime;
0090 
0091     sighand = rcu_dereference_check(tsk->sighand,
0092                     lockdep_tasklist_lock_is_held());
0093     spin_lock(&sighand->siglock);
0094 
0095 #ifdef CONFIG_POSIX_TIMERS
0096     posix_cpu_timers_exit(tsk);
0097     if (group_dead) {
0098         posix_cpu_timers_exit_group(tsk);
0099     } else {
0100         /*
0101          * This can only happen if the caller is de_thread().
0102          * FIXME: this is the temporary hack, we should teach
0103          * posix-cpu-timers to handle this case correctly.
0104          */
0105         if (unlikely(has_group_leader_pid(tsk)))
0106             posix_cpu_timers_exit_group(tsk);
0107     }
0108 #endif
0109 
0110     if (group_dead) {
0111         tty = sig->tty;
0112         sig->tty = NULL;
0113     } else {
0114         /*
0115          * If there is any task waiting for the group exit
0116          * then notify it:
0117          */
0118         if (sig->notify_count > 0 && !--sig->notify_count)
0119             wake_up_process(sig->group_exit_task);
0120 
0121         if (tsk == sig->curr_target)
0122             sig->curr_target = next_thread(tsk);
0123     }
0124 
0125     add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
0126                   sizeof(unsigned long long));
0127 
0128     /*
0129      * Accumulate here the counters for all threads as they die. We could
0130      * skip the group leader because it is the last user of signal_struct,
0131      * but we want to avoid the race with thread_group_cputime() which can
0132      * see the empty ->thread_head list.
0133      */
0134     task_cputime(tsk, &utime, &stime);
0135     write_seqlock(&sig->stats_lock);
0136     sig->utime += utime;
0137     sig->stime += stime;
0138     sig->gtime += task_gtime(tsk);
0139     sig->min_flt += tsk->min_flt;
0140     sig->maj_flt += tsk->maj_flt;
0141     sig->nvcsw += tsk->nvcsw;
0142     sig->nivcsw += tsk->nivcsw;
0143     sig->inblock += task_io_get_inblock(tsk);
0144     sig->oublock += task_io_get_oublock(tsk);
0145     task_io_accounting_add(&sig->ioac, &tsk->ioac);
0146     sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
0147     sig->nr_threads--;
0148     __unhash_process(tsk, group_dead);
0149     write_sequnlock(&sig->stats_lock);
0150 
0151     /*
0152      * Do this under ->siglock, we can race with another thread
0153      * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
0154      */
0155     flush_sigqueue(&tsk->pending);
0156     tsk->sighand = NULL;
0157     spin_unlock(&sighand->siglock);
0158 
0159     __cleanup_sighand(sighand);
0160     clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
0161     if (group_dead) {
0162         flush_sigqueue(&sig->shared_pending);
0163         tty_kref_put(tty);
0164     }
0165 }
0166 
0167 static void delayed_put_task_struct(struct rcu_head *rhp)
0168 {
0169     struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
0170 
0171     perf_event_delayed_put(tsk);
0172     trace_sched_process_free(tsk);
0173     put_task_struct(tsk);
0174 }
0175 
0176 
0177 void release_task(struct task_struct *p)
0178 {
0179     struct task_struct *leader;
0180     int zap_leader;
0181 repeat:
0182     /* don't need to get the RCU readlock here - the process is dead and
0183      * can't be modifying its own credentials. But shut RCU-lockdep up */
0184     rcu_read_lock();
0185     atomic_dec(&__task_cred(p)->user->processes);
0186     rcu_read_unlock();
0187 
0188     proc_flush_task(p);
0189 
0190     write_lock_irq(&tasklist_lock);
0191     ptrace_release_task(p);
0192     __exit_signal(p);
0193 
0194     /*
0195      * If we are the last non-leader member of the thread
0196      * group, and the leader is zombie, then notify the
0197      * group leader's parent process. (if it wants notification.)
0198      */
0199     zap_leader = 0;
0200     leader = p->group_leader;
0201     if (leader != p && thread_group_empty(leader)
0202             && leader->exit_state == EXIT_ZOMBIE) {
0203         /*
0204          * If we were the last child thread and the leader has
0205          * exited already, and the leader's parent ignores SIGCHLD,
0206          * then we are the one who should release the leader.
0207          */
0208         zap_leader = do_notify_parent(leader, leader->exit_signal);
0209         if (zap_leader)
0210             leader->exit_state = EXIT_DEAD;
0211     }
0212 
0213     write_unlock_irq(&tasklist_lock);
0214     release_thread(p);
0215     call_rcu(&p->rcu, delayed_put_task_struct);
0216 
0217     p = leader;
0218     if (unlikely(zap_leader))
0219         goto repeat;
0220 }
0221 
0222 /*
0223  * Note that if this function returns a valid task_struct pointer (!NULL)
0224  * task->usage must remain >0 for the duration of the RCU critical section.
0225  */
0226 struct task_struct *task_rcu_dereference(struct task_struct **ptask)
0227 {
0228     struct sighand_struct *sighand;
0229     struct task_struct *task;
0230 
0231     /*
0232      * We need to verify that release_task() was not called and thus
0233      * delayed_put_task_struct() can't run and drop the last reference
0234      * before rcu_read_unlock(). We check task->sighand != NULL,
0235      * but we can read the already freed and reused memory.
0236      */
0237 retry:
0238     task = rcu_dereference(*ptask);
0239     if (!task)
0240         return NULL;
0241 
0242     probe_kernel_address(&task->sighand, sighand);
0243 
0244     /*
0245      * Pairs with atomic_dec_and_test() in put_task_struct(). If this task
0246      * was already freed we can not miss the preceding update of this
0247      * pointer.
0248      */
0249     smp_rmb();
0250     if (unlikely(task != READ_ONCE(*ptask)))
0251         goto retry;
0252 
0253     /*
0254      * We've re-checked that "task == *ptask", now we have two different
0255      * cases:
0256      *
0257      * 1. This is actually the same task/task_struct. In this case
0258      *    sighand != NULL tells us it is still alive.
0259      *
0260      * 2. This is another task which got the same memory for task_struct.
0261      *    We can't know this of course, and we can not trust
0262      *    sighand != NULL.
0263      *
0264      *    In this case we actually return a random value, but this is
0265      *    correct.
0266      *
0267      *    If we return NULL - we can pretend that we actually noticed that
0268      *    *ptask was updated when the previous task has exited. Or pretend
0269      *    that probe_slab_address(&sighand) reads NULL.
0270      *
0271      *    If we return the new task (because sighand is not NULL for any
0272      *    reason) - this is fine too. This (new) task can't go away before
0273      *    another gp pass.
0274      *
0275      *    And note: We could even eliminate the false positive if re-read
0276      *    task->sighand once again to avoid the falsely NULL. But this case
0277      *    is very unlikely so we don't care.
0278      */
0279     if (!sighand)
0280         return NULL;
0281 
0282     return task;
0283 }
0284 
0285 struct task_struct *try_get_task_struct(struct task_struct **ptask)
0286 {
0287     struct task_struct *task;
0288 
0289     rcu_read_lock();
0290     task = task_rcu_dereference(ptask);
0291     if (task)
0292         get_task_struct(task);
0293     rcu_read_unlock();
0294 
0295     return task;
0296 }
0297 
0298 /*
0299  * Determine if a process group is "orphaned", according to the POSIX
0300  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
0301  * by terminal-generated stop signals.  Newly orphaned process groups are
0302  * to receive a SIGHUP and a SIGCONT.
0303  *
0304  * "I ask you, have you ever known what it is to be an orphan?"
0305  */
0306 static int will_become_orphaned_pgrp(struct pid *pgrp,
0307                     struct task_struct *ignored_task)
0308 {
0309     struct task_struct *p;
0310 
0311     do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
0312         if ((p == ignored_task) ||
0313             (p->exit_state && thread_group_empty(p)) ||
0314             is_global_init(p->real_parent))
0315             continue;
0316 
0317         if (task_pgrp(p->real_parent) != pgrp &&
0318             task_session(p->real_parent) == task_session(p))
0319             return 0;
0320     } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
0321 
0322     return 1;
0323 }
0324 
0325 int is_current_pgrp_orphaned(void)
0326 {
0327     int retval;
0328 
0329     read_lock(&tasklist_lock);
0330     retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
0331     read_unlock(&tasklist_lock);
0332 
0333     return retval;
0334 }
0335 
0336 static bool has_stopped_jobs(struct pid *pgrp)
0337 {
0338     struct task_struct *p;
0339 
0340     do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
0341         if (p->signal->flags & SIGNAL_STOP_STOPPED)
0342             return true;
0343     } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
0344 
0345     return false;
0346 }
0347 
0348 /*
0349  * Check to see if any process groups have become orphaned as
0350  * a result of our exiting, and if they have any stopped jobs,
0351  * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
0352  */
0353 static void
0354 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
0355 {
0356     struct pid *pgrp = task_pgrp(tsk);
0357     struct task_struct *ignored_task = tsk;
0358 
0359     if (!parent)
0360         /* exit: our father is in a different pgrp than
0361          * we are and we were the only connection outside.
0362          */
0363         parent = tsk->real_parent;
0364     else
0365         /* reparent: our child is in a different pgrp than
0366          * we are, and it was the only connection outside.
0367          */
0368         ignored_task = NULL;
0369 
0370     if (task_pgrp(parent) != pgrp &&
0371         task_session(parent) == task_session(tsk) &&
0372         will_become_orphaned_pgrp(pgrp, ignored_task) &&
0373         has_stopped_jobs(pgrp)) {
0374         __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
0375         __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
0376     }
0377 }
0378 
0379 #ifdef CONFIG_MEMCG
0380 /*
0381  * A task is exiting.   If it owned this mm, find a new owner for the mm.
0382  */
0383 void mm_update_next_owner(struct mm_struct *mm)
0384 {
0385     struct task_struct *c, *g, *p = current;
0386 
0387 retry:
0388     /*
0389      * If the exiting or execing task is not the owner, it's
0390      * someone else's problem.
0391      */
0392     if (mm->owner != p)
0393         return;
0394     /*
0395      * The current owner is exiting/execing and there are no other
0396      * candidates.  Do not leave the mm pointing to a possibly
0397      * freed task structure.
0398      */
0399     if (atomic_read(&mm->mm_users) <= 1) {
0400         mm->owner = NULL;
0401         return;
0402     }
0403 
0404     read_lock(&tasklist_lock);
0405     /*
0406      * Search in the children
0407      */
0408     list_for_each_entry(c, &p->children, sibling) {
0409         if (c->mm == mm)
0410             goto assign_new_owner;
0411     }
0412 
0413     /*
0414      * Search in the siblings
0415      */
0416     list_for_each_entry(c, &p->real_parent->children, sibling) {
0417         if (c->mm == mm)
0418             goto assign_new_owner;
0419     }
0420 
0421     /*
0422      * Search through everything else, we should not get here often.
0423      */
0424     for_each_process(g) {
0425         if (g->flags & PF_KTHREAD)
0426             continue;
0427         for_each_thread(g, c) {
0428             if (c->mm == mm)
0429                 goto assign_new_owner;
0430             if (c->mm)
0431                 break;
0432         }
0433     }
0434     read_unlock(&tasklist_lock);
0435     /*
0436      * We found no owner yet mm_users > 1: this implies that we are
0437      * most likely racing with swapoff (try_to_unuse()) or /proc or
0438      * ptrace or page migration (get_task_mm()).  Mark owner as NULL.
0439      */
0440     mm->owner = NULL;
0441     return;
0442 
0443 assign_new_owner:
0444     BUG_ON(c == p);
0445     get_task_struct(c);
0446     /*
0447      * The task_lock protects c->mm from changing.
0448      * We always want mm->owner->mm == mm
0449      */
0450     task_lock(c);
0451     /*
0452      * Delay read_unlock() till we have the task_lock()
0453      * to ensure that c does not slip away underneath us
0454      */
0455     read_unlock(&tasklist_lock);
0456     if (c->mm != mm) {
0457         task_unlock(c);
0458         put_task_struct(c);
0459         goto retry;
0460     }
0461     mm->owner = c;
0462     task_unlock(c);
0463     put_task_struct(c);
0464 }
0465 #endif /* CONFIG_MEMCG */
0466 
0467 /*
0468  * Turn us into a lazy TLB process if we
0469  * aren't already..
0470  */
0471 static void exit_mm(struct task_struct *tsk)
0472 {
0473     struct mm_struct *mm = tsk->mm;
0474     struct core_state *core_state;
0475 
0476     mm_release(tsk, mm);
0477     if (!mm)
0478         return;
0479     sync_mm_rss(mm);
0480     /*
0481      * Serialize with any possible pending coredump.
0482      * We must hold mmap_sem around checking core_state
0483      * and clearing tsk->mm.  The core-inducing thread
0484      * will increment ->nr_threads for each thread in the
0485      * group with ->mm != NULL.
0486      */
0487     down_read(&mm->mmap_sem);
0488     core_state = mm->core_state;
0489     if (core_state) {
0490         struct core_thread self;
0491 
0492         up_read(&mm->mmap_sem);
0493 
0494         self.task = tsk;
0495         self.next = xchg(&core_state->dumper.next, &self);
0496         /*
0497          * Implies mb(), the result of xchg() must be visible
0498          * to core_state->dumper.
0499          */
0500         if (atomic_dec_and_test(&core_state->nr_threads))
0501             complete(&core_state->startup);
0502 
0503         for (;;) {
0504             set_task_state(tsk, TASK_UNINTERRUPTIBLE);
0505             if (!self.task) /* see coredump_finish() */
0506                 break;
0507             freezable_schedule();
0508         }
0509         __set_task_state(tsk, TASK_RUNNING);
0510         down_read(&mm->mmap_sem);
0511     }
0512     atomic_inc(&mm->mm_count);
0513     BUG_ON(mm != tsk->active_mm);
0514     /* more a memory barrier than a real lock */
0515     task_lock(tsk);
0516     tsk->mm = NULL;
0517     up_read(&mm->mmap_sem);
0518     enter_lazy_tlb(mm, current);
0519     task_unlock(tsk);
0520     mm_update_next_owner(mm);
0521     mmput(mm);
0522     if (test_thread_flag(TIF_MEMDIE))
0523         exit_oom_victim();
0524 }
0525 
0526 static struct task_struct *find_alive_thread(struct task_struct *p)
0527 {
0528     struct task_struct *t;
0529 
0530     for_each_thread(p, t) {
0531         if (!(t->flags & PF_EXITING))
0532             return t;
0533     }
0534     return NULL;
0535 }
0536 
0537 static struct task_struct *find_child_reaper(struct task_struct *father)
0538     __releases(&tasklist_lock)
0539     __acquires(&tasklist_lock)
0540 {
0541     struct pid_namespace *pid_ns = task_active_pid_ns(father);
0542     struct task_struct *reaper = pid_ns->child_reaper;
0543 
0544     if (likely(reaper != father))
0545         return reaper;
0546 
0547     reaper = find_alive_thread(father);
0548     if (reaper) {
0549         pid_ns->child_reaper = reaper;
0550         return reaper;
0551     }
0552 
0553     write_unlock_irq(&tasklist_lock);
0554     if (unlikely(pid_ns == &init_pid_ns)) {
0555         panic("Attempted to kill init! exitcode=0x%08x\n",
0556             father->signal->group_exit_code ?: father->exit_code);
0557     }
0558     zap_pid_ns_processes(pid_ns);
0559     write_lock_irq(&tasklist_lock);
0560 
0561     return father;
0562 }
0563 
0564 /*
0565  * When we die, we re-parent all our children, and try to:
0566  * 1. give them to another thread in our thread group, if such a member exists
0567  * 2. give it to the first ancestor process which prctl'd itself as a
0568  *    child_subreaper for its children (like a service manager)
0569  * 3. give it to the init process (PID 1) in our pid namespace
0570  */
0571 static struct task_struct *find_new_reaper(struct task_struct *father,
0572                        struct task_struct *child_reaper)
0573 {
0574     struct task_struct *thread, *reaper;
0575 
0576     thread = find_alive_thread(father);
0577     if (thread)
0578         return thread;
0579 
0580     if (father->signal->has_child_subreaper) {
0581         /*
0582          * Find the first ->is_child_subreaper ancestor in our pid_ns.
0583          * We start from father to ensure we can not look into another
0584          * namespace, this is safe because all its threads are dead.
0585          */
0586         for (reaper = father;
0587              !same_thread_group(reaper, child_reaper);
0588              reaper = reaper->real_parent) {
0589             /* call_usermodehelper() descendants need this check */
0590             if (reaper == &init_task)
0591                 break;
0592             if (!reaper->signal->is_child_subreaper)
0593                 continue;
0594             thread = find_alive_thread(reaper);
0595             if (thread)
0596                 return thread;
0597         }
0598     }
0599 
0600     return child_reaper;
0601 }
0602 
0603 /*
0604 * Any that need to be release_task'd are put on the @dead list.
0605  */
0606 static void reparent_leader(struct task_struct *father, struct task_struct *p,
0607                 struct list_head *dead)
0608 {
0609     if (unlikely(p->exit_state == EXIT_DEAD))
0610         return;
0611 
0612     /* We don't want people slaying init. */
0613     p->exit_signal = SIGCHLD;
0614 
0615     /* If it has exited notify the new parent about this child's death. */
0616     if (!p->ptrace &&
0617         p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
0618         if (do_notify_parent(p, p->exit_signal)) {
0619             p->exit_state = EXIT_DEAD;
0620             list_add(&p->ptrace_entry, dead);
0621         }
0622     }
0623 
0624     kill_orphaned_pgrp(p, father);
0625 }
0626 
0627 /*
0628  * This does two things:
0629  *
0630  * A.  Make init inherit all the child processes
0631  * B.  Check to see if any process groups have become orphaned
0632  *  as a result of our exiting, and if they have any stopped
0633  *  jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
0634  */
0635 static void forget_original_parent(struct task_struct *father,
0636                     struct list_head *dead)
0637 {
0638     struct task_struct *p, *t, *reaper;
0639 
0640     if (unlikely(!list_empty(&father->ptraced)))
0641         exit_ptrace(father, dead);
0642 
0643     /* Can drop and reacquire tasklist_lock */
0644     reaper = find_child_reaper(father);
0645     if (list_empty(&father->children))
0646         return;
0647 
0648     reaper = find_new_reaper(father, reaper);
0649     list_for_each_entry(p, &father->children, sibling) {
0650         for_each_thread(p, t) {
0651             t->real_parent = reaper;
0652             BUG_ON((!t->ptrace) != (t->parent == father));
0653             if (likely(!t->ptrace))
0654                 t->parent = t->real_parent;
0655             if (t->pdeath_signal)
0656                 group_send_sig_info(t->pdeath_signal,
0657                             SEND_SIG_NOINFO, t);
0658         }
0659         /*
0660          * If this is a threaded reparent there is no need to
0661          * notify anyone anything has happened.
0662          */
0663         if (!same_thread_group(reaper, father))
0664             reparent_leader(father, p, dead);
0665     }
0666     list_splice_tail_init(&father->children, &reaper->children);
0667 }
0668 
0669 /*
0670  * Send signals to all our closest relatives so that they know
0671  * to properly mourn us..
0672  */
0673 static void exit_notify(struct task_struct *tsk, int group_dead)
0674 {
0675     bool autoreap;
0676     struct task_struct *p, *n;
0677     LIST_HEAD(dead);
0678 
0679     write_lock_irq(&tasklist_lock);
0680     forget_original_parent(tsk, &dead);
0681 
0682     if (group_dead)
0683         kill_orphaned_pgrp(tsk->group_leader, NULL);
0684 
0685     if (unlikely(tsk->ptrace)) {
0686         int sig = thread_group_leader(tsk) &&
0687                 thread_group_empty(tsk) &&
0688                 !ptrace_reparented(tsk) ?
0689             tsk->exit_signal : SIGCHLD;
0690         autoreap = do_notify_parent(tsk, sig);
0691     } else if (thread_group_leader(tsk)) {
0692         autoreap = thread_group_empty(tsk) &&
0693             do_notify_parent(tsk, tsk->exit_signal);
0694     } else {
0695         autoreap = true;
0696     }
0697 
0698     tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
0699     if (tsk->exit_state == EXIT_DEAD)
0700         list_add(&tsk->ptrace_entry, &dead);
0701 
0702     /* mt-exec, de_thread() is waiting for group leader */
0703     if (unlikely(tsk->signal->notify_count < 0))
0704         wake_up_process(tsk->signal->group_exit_task);
0705     write_unlock_irq(&tasklist_lock);
0706 
0707     list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
0708         list_del_init(&p->ptrace_entry);
0709         release_task(p);
0710     }
0711 }
0712 
0713 #ifdef CONFIG_DEBUG_STACK_USAGE
0714 static void check_stack_usage(void)
0715 {
0716     static DEFINE_SPINLOCK(low_water_lock);
0717     static int lowest_to_date = THREAD_SIZE;
0718     unsigned long free;
0719 
0720     free = stack_not_used(current);
0721 
0722     if (free >= lowest_to_date)
0723         return;
0724 
0725     spin_lock(&low_water_lock);
0726     if (free < lowest_to_date) {
0727         pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
0728             current->comm, task_pid_nr(current), free);
0729         lowest_to_date = free;
0730     }
0731     spin_unlock(&low_water_lock);
0732 }
0733 #else
0734 static inline void check_stack_usage(void) {}
0735 #endif
0736 
0737 void __noreturn do_exit(long code)
0738 {
0739     struct task_struct *tsk = current;
0740     int group_dead;
0741     TASKS_RCU(int tasks_rcu_i);
0742 
0743     profile_task_exit(tsk);
0744     kcov_task_exit(tsk);
0745 
0746     WARN_ON(blk_needs_flush_plug(tsk));
0747 
0748     if (unlikely(in_interrupt()))
0749         panic("Aiee, killing interrupt handler!");
0750     if (unlikely(!tsk->pid))
0751         panic("Attempted to kill the idle task!");
0752 
0753     /*
0754      * If do_exit is called because this processes oopsed, it's possible
0755      * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
0756      * continuing. Amongst other possible reasons, this is to prevent
0757      * mm_release()->clear_child_tid() from writing to a user-controlled
0758      * kernel address.
0759      */
0760     set_fs(USER_DS);
0761 
0762     ptrace_event(PTRACE_EVENT_EXIT, code);
0763 
0764     validate_creds_for_do_exit(tsk);
0765 
0766     /*
0767      * We're taking recursive faults here in do_exit. Safest is to just
0768      * leave this task alone and wait for reboot.
0769      */
0770     if (unlikely(tsk->flags & PF_EXITING)) {
0771         pr_alert("Fixing recursive fault but reboot is needed!\n");
0772         /*
0773          * We can do this unlocked here. The futex code uses
0774          * this flag just to verify whether the pi state
0775          * cleanup has been done or not. In the worst case it
0776          * loops once more. We pretend that the cleanup was
0777          * done as there is no way to return. Either the
0778          * OWNER_DIED bit is set by now or we push the blocked
0779          * task into the wait for ever nirwana as well.
0780          */
0781         tsk->flags |= PF_EXITPIDONE;
0782         set_current_state(TASK_UNINTERRUPTIBLE);
0783         schedule();
0784     }
0785 
0786     exit_signals(tsk);  /* sets PF_EXITING */
0787     /*
0788      * Ensure that all new tsk->pi_lock acquisitions must observe
0789      * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
0790      */
0791     smp_mb();
0792     /*
0793      * Ensure that we must observe the pi_state in exit_mm() ->
0794      * mm_release() -> exit_pi_state_list().
0795      */
0796     raw_spin_unlock_wait(&tsk->pi_lock);
0797 
0798     if (unlikely(in_atomic())) {
0799         pr_info("note: %s[%d] exited with preempt_count %d\n",
0800             current->comm, task_pid_nr(current),
0801             preempt_count());
0802         preempt_count_set(PREEMPT_ENABLED);
0803     }
0804 
0805     /* sync mm's RSS info before statistics gathering */
0806     if (tsk->mm)
0807         sync_mm_rss(tsk->mm);
0808     acct_update_integrals(tsk);
0809     group_dead = atomic_dec_and_test(&tsk->signal->live);
0810     if (group_dead) {
0811 #ifdef CONFIG_POSIX_TIMERS
0812         hrtimer_cancel(&tsk->signal->real_timer);
0813         exit_itimers(tsk->signal);
0814 #endif
0815         if (tsk->mm)
0816             setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
0817     }
0818     acct_collect(code, group_dead);
0819     if (group_dead)
0820         tty_audit_exit();
0821     audit_free(tsk);
0822 
0823     tsk->exit_code = code;
0824     taskstats_exit(tsk, group_dead);
0825 
0826     exit_mm(tsk);
0827 
0828     if (group_dead)
0829         acct_process();
0830     trace_sched_process_exit(tsk);
0831 
0832     exit_sem(tsk);
0833     exit_shm(tsk);
0834     exit_files(tsk);
0835     exit_fs(tsk);
0836     if (group_dead)
0837         disassociate_ctty(1);
0838     exit_task_namespaces(tsk);
0839     exit_task_work(tsk);
0840     exit_thread(tsk);
0841 
0842     /*
0843      * Flush inherited counters to the parent - before the parent
0844      * gets woken up by child-exit notifications.
0845      *
0846      * because of cgroup mode, must be called before cgroup_exit()
0847      */
0848     perf_event_exit_task(tsk);
0849 
0850     sched_autogroup_exit_task(tsk);
0851     cgroup_exit(tsk);
0852 
0853     /*
0854      * FIXME: do that only when needed, using sched_exit tracepoint
0855      */
0856     flush_ptrace_hw_breakpoint(tsk);
0857 
0858     TASKS_RCU(preempt_disable());
0859     TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
0860     TASKS_RCU(preempt_enable());
0861     exit_notify(tsk, group_dead);
0862     proc_exit_connector(tsk);
0863     mpol_put_task_policy(tsk);
0864 #ifdef CONFIG_FUTEX
0865     if (unlikely(current->pi_state_cache))
0866         kfree(current->pi_state_cache);
0867 #endif
0868     /*
0869      * Make sure we are holding no locks:
0870      */
0871     debug_check_no_locks_held();
0872     /*
0873      * We can do this unlocked here. The futex code uses this flag
0874      * just to verify whether the pi state cleanup has been done
0875      * or not. In the worst case it loops once more.
0876      */
0877     tsk->flags |= PF_EXITPIDONE;
0878 
0879     if (tsk->io_context)
0880         exit_io_context(tsk);
0881 
0882     if (tsk->splice_pipe)
0883         free_pipe_info(tsk->splice_pipe);
0884 
0885     if (tsk->task_frag.page)
0886         put_page(tsk->task_frag.page);
0887 
0888     validate_creds_for_do_exit(tsk);
0889 
0890     check_stack_usage();
0891     preempt_disable();
0892     if (tsk->nr_dirtied)
0893         __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
0894     exit_rcu();
0895     TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
0896 
0897     do_task_dead();
0898 }
0899 EXPORT_SYMBOL_GPL(do_exit);
0900 
0901 void complete_and_exit(struct completion *comp, long code)
0902 {
0903     if (comp)
0904         complete(comp);
0905 
0906     do_exit(code);
0907 }
0908 EXPORT_SYMBOL(complete_and_exit);
0909 
0910 SYSCALL_DEFINE1(exit, int, error_code)
0911 {
0912     do_exit((error_code&0xff)<<8);
0913 }
0914 
0915 /*
0916  * Take down every thread in the group.  This is called by fatal signals
0917  * as well as by sys_exit_group (below).
0918  */
0919 void
0920 do_group_exit(int exit_code)
0921 {
0922     struct signal_struct *sig = current->signal;
0923 
0924     BUG_ON(exit_code & 0x80); /* core dumps don't get here */
0925 
0926     if (signal_group_exit(sig))
0927         exit_code = sig->group_exit_code;
0928     else if (!thread_group_empty(current)) {
0929         struct sighand_struct *const sighand = current->sighand;
0930 
0931         spin_lock_irq(&sighand->siglock);
0932         if (signal_group_exit(sig))
0933             /* Another thread got here before we took the lock.  */
0934             exit_code = sig->group_exit_code;
0935         else {
0936             sig->group_exit_code = exit_code;
0937             sig->flags = SIGNAL_GROUP_EXIT;
0938             zap_other_threads(current);
0939         }
0940         spin_unlock_irq(&sighand->siglock);
0941     }
0942 
0943     do_exit(exit_code);
0944     /* NOTREACHED */
0945 }
0946 
0947 /*
0948  * this kills every thread in the thread group. Note that any externally
0949  * wait4()-ing process will get the correct exit code - even if this
0950  * thread is not the thread group leader.
0951  */
0952 SYSCALL_DEFINE1(exit_group, int, error_code)
0953 {
0954     do_group_exit((error_code & 0xff) << 8);
0955     /* NOTREACHED */
0956     return 0;
0957 }
0958 
0959 struct wait_opts {
0960     enum pid_type       wo_type;
0961     int         wo_flags;
0962     struct pid      *wo_pid;
0963 
0964     struct siginfo __user   *wo_info;
0965     int __user      *wo_stat;
0966     struct rusage __user    *wo_rusage;
0967 
0968     wait_queue_t        child_wait;
0969     int         notask_error;
0970 };
0971 
0972 static inline
0973 struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
0974 {
0975     if (type != PIDTYPE_PID)
0976         task = task->group_leader;
0977     return task->pids[type].pid;
0978 }
0979 
0980 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
0981 {
0982     return  wo->wo_type == PIDTYPE_MAX ||
0983         task_pid_type(p, wo->wo_type) == wo->wo_pid;
0984 }
0985 
0986 static int
0987 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
0988 {
0989     if (!eligible_pid(wo, p))
0990         return 0;
0991 
0992     /*
0993      * Wait for all children (clone and not) if __WALL is set or
0994      * if it is traced by us.
0995      */
0996     if (ptrace || (wo->wo_flags & __WALL))
0997         return 1;
0998 
0999     /*
1000      * Otherwise, wait for clone children *only* if __WCLONE is set;
1001      * otherwise, wait for non-clone children *only*.
1002      *
1003      * Note: a "clone" child here is one that reports to its parent
1004      * using a signal other than SIGCHLD, or a non-leader thread which
1005      * we can only see if it is traced by us.
1006      */
1007     if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
1008         return 0;
1009 
1010     return 1;
1011 }
1012 
1013 static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
1014                 pid_t pid, uid_t uid, int why, int status)
1015 {
1016     struct siginfo __user *infop;
1017     int retval = wo->wo_rusage
1018         ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1019 
1020     put_task_struct(p);
1021     infop = wo->wo_info;
1022     if (infop) {
1023         if (!retval)
1024             retval = put_user(SIGCHLD, &infop->si_signo);
1025         if (!retval)
1026             retval = put_user(0, &infop->si_errno);
1027         if (!retval)
1028             retval = put_user((short)why, &infop->si_code);
1029         if (!retval)
1030             retval = put_user(pid, &infop->si_pid);
1031         if (!retval)
1032             retval = put_user(uid, &infop->si_uid);
1033         if (!retval)
1034             retval = put_user(status, &infop->si_status);
1035     }
1036     if (!retval)
1037         retval = pid;
1038     return retval;
1039 }
1040 
1041 /*
1042  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1043  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1044  * the lock and this task is uninteresting.  If we return nonzero, we have
1045  * released the lock and the system call should return.
1046  */
1047 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1048 {
1049     int state, retval, status;
1050     pid_t pid = task_pid_vnr(p);
1051     uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1052     struct siginfo __user *infop;
1053 
1054     if (!likely(wo->wo_flags & WEXITED))
1055         return 0;
1056 
1057     if (unlikely(wo->wo_flags & WNOWAIT)) {
1058         int exit_code = p->exit_code;
1059         int why;
1060 
1061         get_task_struct(p);
1062         read_unlock(&tasklist_lock);
1063         sched_annotate_sleep();
1064 
1065         if ((exit_code & 0x7f) == 0) {
1066             why = CLD_EXITED;
1067             status = exit_code >> 8;
1068         } else {
1069             why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1070             status = exit_code & 0x7f;
1071         }
1072         return wait_noreap_copyout(wo, p, pid, uid, why, status);
1073     }
1074     /*
1075      * Move the task's state to DEAD/TRACE, only one thread can do this.
1076      */
1077     state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1078         EXIT_TRACE : EXIT_DEAD;
1079     if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1080         return 0;
1081     /*
1082      * We own this thread, nobody else can reap it.
1083      */
1084     read_unlock(&tasklist_lock);
1085     sched_annotate_sleep();
1086 
1087     /*
1088      * Check thread_group_leader() to exclude the traced sub-threads.
1089      */
1090     if (state == EXIT_DEAD && thread_group_leader(p)) {
1091         struct signal_struct *sig = p->signal;
1092         struct signal_struct *psig = current->signal;
1093         unsigned long maxrss;
1094         cputime_t tgutime, tgstime;
1095 
1096         /*
1097          * The resource counters for the group leader are in its
1098          * own task_struct.  Those for dead threads in the group
1099          * are in its signal_struct, as are those for the child
1100          * processes it has previously reaped.  All these
1101          * accumulate in the parent's signal_struct c* fields.
1102          *
1103          * We don't bother to take a lock here to protect these
1104          * p->signal fields because the whole thread group is dead
1105          * and nobody can change them.
1106          *
1107          * psig->stats_lock also protects us from our sub-theads
1108          * which can reap other children at the same time. Until
1109          * we change k_getrusage()-like users to rely on this lock
1110          * we have to take ->siglock as well.
1111          *
1112          * We use thread_group_cputime_adjusted() to get times for
1113          * the thread group, which consolidates times for all threads
1114          * in the group including the group leader.
1115          */
1116         thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1117         spin_lock_irq(&current->sighand->siglock);
1118         write_seqlock(&psig->stats_lock);
1119         psig->cutime += tgutime + sig->cutime;
1120         psig->cstime += tgstime + sig->cstime;
1121         psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1122         psig->cmin_flt +=
1123             p->min_flt + sig->min_flt + sig->cmin_flt;
1124         psig->cmaj_flt +=
1125             p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1126         psig->cnvcsw +=
1127             p->nvcsw + sig->nvcsw + sig->cnvcsw;
1128         psig->cnivcsw +=
1129             p->nivcsw + sig->nivcsw + sig->cnivcsw;
1130         psig->cinblock +=
1131             task_io_get_inblock(p) +
1132             sig->inblock + sig->cinblock;
1133         psig->coublock +=
1134             task_io_get_oublock(p) +
1135             sig->oublock + sig->coublock;
1136         maxrss = max(sig->maxrss, sig->cmaxrss);
1137         if (psig->cmaxrss < maxrss)
1138             psig->cmaxrss = maxrss;
1139         task_io_accounting_add(&psig->ioac, &p->ioac);
1140         task_io_accounting_add(&psig->ioac, &sig->ioac);
1141         write_sequnlock(&psig->stats_lock);
1142         spin_unlock_irq(&current->sighand->siglock);
1143     }
1144 
1145     retval = wo->wo_rusage
1146         ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1147     status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1148         ? p->signal->group_exit_code : p->exit_code;
1149     if (!retval && wo->wo_stat)
1150         retval = put_user(status, wo->wo_stat);
1151 
1152     infop = wo->wo_info;
1153     if (!retval && infop)
1154         retval = put_user(SIGCHLD, &infop->si_signo);
1155     if (!retval && infop)
1156         retval = put_user(0, &infop->si_errno);
1157     if (!retval && infop) {
1158         int why;
1159 
1160         if ((status & 0x7f) == 0) {
1161             why = CLD_EXITED;
1162             status >>= 8;
1163         } else {
1164             why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1165             status &= 0x7f;
1166         }
1167         retval = put_user((short)why, &infop->si_code);
1168         if (!retval)
1169             retval = put_user(status, &infop->si_status);
1170     }
1171     if (!retval && infop)
1172         retval = put_user(pid, &infop->si_pid);
1173     if (!retval && infop)
1174         retval = put_user(uid, &infop->si_uid);
1175     if (!retval)
1176         retval = pid;
1177 
1178     if (state == EXIT_TRACE) {
1179         write_lock_irq(&tasklist_lock);
1180         /* We dropped tasklist, ptracer could die and untrace */
1181         ptrace_unlink(p);
1182 
1183         /* If parent wants a zombie, don't release it now */
1184         state = EXIT_ZOMBIE;
1185         if (do_notify_parent(p, p->exit_signal))
1186             state = EXIT_DEAD;
1187         p->exit_state = state;
1188         write_unlock_irq(&tasklist_lock);
1189     }
1190     if (state == EXIT_DEAD)
1191         release_task(p);
1192 
1193     return retval;
1194 }
1195 
1196 static int *task_stopped_code(struct task_struct *p, bool ptrace)
1197 {
1198     if (ptrace) {
1199         if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
1200             return &p->exit_code;
1201     } else {
1202         if (p->signal->flags & SIGNAL_STOP_STOPPED)
1203             return &p->signal->group_exit_code;
1204     }
1205     return NULL;
1206 }
1207 
1208 /**
1209  * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1210  * @wo: wait options
1211  * @ptrace: is the wait for ptrace
1212  * @p: task to wait for
1213  *
1214  * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1215  *
1216  * CONTEXT:
1217  * read_lock(&tasklist_lock), which is released if return value is
1218  * non-zero.  Also, grabs and releases @p->sighand->siglock.
1219  *
1220  * RETURNS:
1221  * 0 if wait condition didn't exist and search for other wait conditions
1222  * should continue.  Non-zero return, -errno on failure and @p's pid on
1223  * success, implies that tasklist_lock is released and wait condition
1224  * search should terminate.
1225  */
1226 static int wait_task_stopped(struct wait_opts *wo,
1227                 int ptrace, struct task_struct *p)
1228 {
1229     struct siginfo __user *infop;
1230     int retval, exit_code, *p_code, why;
1231     uid_t uid = 0; /* unneeded, required by compiler */
1232     pid_t pid;
1233 
1234     /*
1235      * Traditionally we see ptrace'd stopped tasks regardless of options.
1236      */
1237     if (!ptrace && !(wo->wo_flags & WUNTRACED))
1238         return 0;
1239 
1240     if (!task_stopped_code(p, ptrace))
1241         return 0;
1242 
1243     exit_code = 0;
1244     spin_lock_irq(&p->sighand->siglock);
1245 
1246     p_code = task_stopped_code(p, ptrace);
1247     if (unlikely(!p_code))
1248         goto unlock_sig;
1249 
1250     exit_code = *p_code;
1251     if (!exit_code)
1252         goto unlock_sig;
1253 
1254     if (!unlikely(wo->wo_flags & WNOWAIT))
1255         *p_code = 0;
1256 
1257     uid = from_kuid_munged(current_user_ns(), task_uid(p));
1258 unlock_sig:
1259     spin_unlock_irq(&p->sighand->siglock);
1260     if (!exit_code)
1261         return 0;
1262 
1263     /*
1264      * Now we are pretty sure this task is interesting.
1265      * Make sure it doesn't get reaped out from under us while we
1266      * give up the lock and then examine it below.  We don't want to
1267      * keep holding onto the tasklist_lock while we call getrusage and
1268      * possibly take page faults for user memory.
1269      */
1270     get_task_struct(p);
1271     pid = task_pid_vnr(p);
1272     why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1273     read_unlock(&tasklist_lock);
1274     sched_annotate_sleep();
1275 
1276     if (unlikely(wo->wo_flags & WNOWAIT))
1277         return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
1278 
1279     retval = wo->wo_rusage
1280         ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1281     if (!retval && wo->wo_stat)
1282         retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
1283 
1284     infop = wo->wo_info;
1285     if (!retval && infop)
1286         retval = put_user(SIGCHLD, &infop->si_signo);
1287     if (!retval && infop)
1288         retval = put_user(0, &infop->si_errno);
1289     if (!retval && infop)
1290         retval = put_user((short)why, &infop->si_code);
1291     if (!retval && infop)
1292         retval = put_user(exit_code, &infop->si_status);
1293     if (!retval && infop)
1294         retval = put_user(pid, &infop->si_pid);
1295     if (!retval && infop)
1296         retval = put_user(uid, &infop->si_uid);
1297     if (!retval)
1298         retval = pid;
1299     put_task_struct(p);
1300 
1301     BUG_ON(!retval);
1302     return retval;
1303 }
1304 
1305 /*
1306  * Handle do_wait work for one task in a live, non-stopped state.
1307  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1308  * the lock and this task is uninteresting.  If we return nonzero, we have
1309  * released the lock and the system call should return.
1310  */
1311 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1312 {
1313     int retval;
1314     pid_t pid;
1315     uid_t uid;
1316 
1317     if (!unlikely(wo->wo_flags & WCONTINUED))
1318         return 0;
1319 
1320     if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1321         return 0;
1322 
1323     spin_lock_irq(&p->sighand->siglock);
1324     /* Re-check with the lock held.  */
1325     if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1326         spin_unlock_irq(&p->sighand->siglock);
1327         return 0;
1328     }
1329     if (!unlikely(wo->wo_flags & WNOWAIT))
1330         p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1331     uid = from_kuid_munged(current_user_ns(), task_uid(p));
1332     spin_unlock_irq(&p->sighand->siglock);
1333 
1334     pid = task_pid_vnr(p);
1335     get_task_struct(p);
1336     read_unlock(&tasklist_lock);
1337     sched_annotate_sleep();
1338 
1339     if (!wo->wo_info) {
1340         retval = wo->wo_rusage
1341             ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1342         put_task_struct(p);
1343         if (!retval && wo->wo_stat)
1344             retval = put_user(0xffff, wo->wo_stat);
1345         if (!retval)
1346             retval = pid;
1347     } else {
1348         retval = wait_noreap_copyout(wo, p, pid, uid,
1349                          CLD_CONTINUED, SIGCONT);
1350         BUG_ON(retval == 0);
1351     }
1352 
1353     return retval;
1354 }
1355 
1356 /*
1357  * Consider @p for a wait by @parent.
1358  *
1359  * -ECHILD should be in ->notask_error before the first call.
1360  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1361  * Returns zero if the search for a child should continue;
1362  * then ->notask_error is 0 if @p is an eligible child,
1363  * or another error from security_task_wait(), or still -ECHILD.
1364  */
1365 static int wait_consider_task(struct wait_opts *wo, int ptrace,
1366                 struct task_struct *p)
1367 {
1368     /*
1369      * We can race with wait_task_zombie() from another thread.
1370      * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1371      * can't confuse the checks below.
1372      */
1373     int exit_state = ACCESS_ONCE(p->exit_state);
1374     int ret;
1375 
1376     if (unlikely(exit_state == EXIT_DEAD))
1377         return 0;
1378 
1379     ret = eligible_child(wo, ptrace, p);
1380     if (!ret)
1381         return ret;
1382 
1383     ret = security_task_wait(p);
1384     if (unlikely(ret < 0)) {
1385         /*
1386          * If we have not yet seen any eligible child,
1387          * then let this error code replace -ECHILD.
1388          * A permission error will give the user a clue
1389          * to look for security policy problems, rather
1390          * than for mysterious wait bugs.
1391          */
1392         if (wo->notask_error)
1393             wo->notask_error = ret;
1394         return 0;
1395     }
1396 
1397     if (unlikely(exit_state == EXIT_TRACE)) {
1398         /*
1399          * ptrace == 0 means we are the natural parent. In this case
1400          * we should clear notask_error, debugger will notify us.
1401          */
1402         if (likely(!ptrace))
1403             wo->notask_error = 0;
1404         return 0;
1405     }
1406 
1407     if (likely(!ptrace) && unlikely(p->ptrace)) {
1408         /*
1409          * If it is traced by its real parent's group, just pretend
1410          * the caller is ptrace_do_wait() and reap this child if it
1411          * is zombie.
1412          *
1413          * This also hides group stop state from real parent; otherwise
1414          * a single stop can be reported twice as group and ptrace stop.
1415          * If a ptracer wants to distinguish these two events for its
1416          * own children it should create a separate process which takes
1417          * the role of real parent.
1418          */
1419         if (!ptrace_reparented(p))
1420             ptrace = 1;
1421     }
1422 
1423     /* slay zombie? */
1424     if (exit_state == EXIT_ZOMBIE) {
1425         /* we don't reap group leaders with subthreads */
1426         if (!delay_group_leader(p)) {
1427             /*
1428              * A zombie ptracee is only visible to its ptracer.
1429              * Notification and reaping will be cascaded to the
1430              * real parent when the ptracer detaches.
1431              */
1432             if (unlikely(ptrace) || likely(!p->ptrace))
1433                 return wait_task_zombie(wo, p);
1434         }
1435 
1436         /*
1437          * Allow access to stopped/continued state via zombie by
1438          * falling through.  Clearing of notask_error is complex.
1439          *
1440          * When !@ptrace:
1441          *
1442          * If WEXITED is set, notask_error should naturally be
1443          * cleared.  If not, subset of WSTOPPED|WCONTINUED is set,
1444          * so, if there are live subthreads, there are events to
1445          * wait for.  If all subthreads are dead, it's still safe
1446          * to clear - this function will be called again in finite
1447          * amount time once all the subthreads are released and
1448          * will then return without clearing.
1449          *
1450          * When @ptrace:
1451          *
1452          * Stopped state is per-task and thus can't change once the
1453          * target task dies.  Only continued and exited can happen.
1454          * Clear notask_error if WCONTINUED | WEXITED.
1455          */
1456         if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1457             wo->notask_error = 0;
1458     } else {
1459         /*
1460          * @p is alive and it's gonna stop, continue or exit, so
1461          * there always is something to wait for.
1462          */
1463         wo->notask_error = 0;
1464     }
1465 
1466     /*
1467      * Wait for stopped.  Depending on @ptrace, different stopped state
1468      * is used and the two don't interact with each other.
1469      */
1470     ret = wait_task_stopped(wo, ptrace, p);
1471     if (ret)
1472         return ret;
1473 
1474     /*
1475      * Wait for continued.  There's only one continued state and the
1476      * ptracer can consume it which can confuse the real parent.  Don't
1477      * use WCONTINUED from ptracer.  You don't need or want it.
1478      */
1479     return wait_task_continued(wo, p);
1480 }
1481 
1482 /*
1483  * Do the work of do_wait() for one thread in the group, @tsk.
1484  *
1485  * -ECHILD should be in ->notask_error before the first call.
1486  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1487  * Returns zero if the search for a child should continue; then
1488  * ->notask_error is 0 if there were any eligible children,
1489  * or another error from security_task_wait(), or still -ECHILD.
1490  */
1491 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1492 {
1493     struct task_struct *p;
1494 
1495     list_for_each_entry(p, &tsk->children, sibling) {
1496         int ret = wait_consider_task(wo, 0, p);
1497 
1498         if (ret)
1499             return ret;
1500     }
1501 
1502     return 0;
1503 }
1504 
1505 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1506 {
1507     struct task_struct *p;
1508 
1509     list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1510         int ret = wait_consider_task(wo, 1, p);
1511 
1512         if (ret)
1513             return ret;
1514     }
1515 
1516     return 0;
1517 }
1518 
1519 static int child_wait_callback(wait_queue_t *wait, unsigned mode,
1520                 int sync, void *key)
1521 {
1522     struct wait_opts *wo = container_of(wait, struct wait_opts,
1523                         child_wait);
1524     struct task_struct *p = key;
1525 
1526     if (!eligible_pid(wo, p))
1527         return 0;
1528 
1529     if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1530         return 0;
1531 
1532     return default_wake_function(wait, mode, sync, key);
1533 }
1534 
1535 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1536 {
1537     __wake_up_sync_key(&parent->signal->wait_chldexit,
1538                 TASK_INTERRUPTIBLE, 1, p);
1539 }
1540 
1541 static long do_wait(struct wait_opts *wo)
1542 {
1543     struct task_struct *tsk;
1544     int retval;
1545 
1546     trace_sched_process_wait(wo->wo_pid);
1547 
1548     init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1549     wo->child_wait.private = current;
1550     add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1551 repeat:
1552     /*
1553      * If there is nothing that can match our criteria, just get out.
1554      * We will clear ->notask_error to zero if we see any child that
1555      * might later match our criteria, even if we are not able to reap
1556      * it yet.
1557      */
1558     wo->notask_error = -ECHILD;
1559     if ((wo->wo_type < PIDTYPE_MAX) &&
1560        (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
1561         goto notask;
1562 
1563     set_current_state(TASK_INTERRUPTIBLE);
1564     read_lock(&tasklist_lock);
1565     tsk = current;
1566     do {
1567         retval = do_wait_thread(wo, tsk);
1568         if (retval)
1569             goto end;
1570 
1571         retval = ptrace_do_wait(wo, tsk);
1572         if (retval)
1573             goto end;
1574 
1575         if (wo->wo_flags & __WNOTHREAD)
1576             break;
1577     } while_each_thread(current, tsk);
1578     read_unlock(&tasklist_lock);
1579 
1580 notask:
1581     retval = wo->notask_error;
1582     if (!retval && !(wo->wo_flags & WNOHANG)) {
1583         retval = -ERESTARTSYS;
1584         if (!signal_pending(current)) {
1585             schedule();
1586             goto repeat;
1587         }
1588     }
1589 end:
1590     __set_current_state(TASK_RUNNING);
1591     remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1592     return retval;
1593 }
1594 
1595 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1596         infop, int, options, struct rusage __user *, ru)
1597 {
1598     struct wait_opts wo;
1599     struct pid *pid = NULL;
1600     enum pid_type type;
1601     long ret;
1602 
1603     if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1604             __WNOTHREAD|__WCLONE|__WALL))
1605         return -EINVAL;
1606     if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1607         return -EINVAL;
1608 
1609     switch (which) {
1610     case P_ALL:
1611         type = PIDTYPE_MAX;
1612         break;
1613     case P_PID:
1614         type = PIDTYPE_PID;
1615         if (upid <= 0)
1616             return -EINVAL;
1617         break;
1618     case P_PGID:
1619         type = PIDTYPE_PGID;
1620         if (upid <= 0)
1621             return -EINVAL;
1622         break;
1623     default:
1624         return -EINVAL;
1625     }
1626 
1627     if (type < PIDTYPE_MAX)
1628         pid = find_get_pid(upid);
1629 
1630     wo.wo_type  = type;
1631     wo.wo_pid   = pid;
1632     wo.wo_flags = options;
1633     wo.wo_info  = infop;
1634     wo.wo_stat  = NULL;
1635     wo.wo_rusage    = ru;
1636     ret = do_wait(&wo);
1637 
1638     if (ret > 0) {
1639         ret = 0;
1640     } else if (infop) {
1641         /*
1642          * For a WNOHANG return, clear out all the fields
1643          * we would set so the user can easily tell the
1644          * difference.
1645          */
1646         if (!ret)
1647             ret = put_user(0, &infop->si_signo);
1648         if (!ret)
1649             ret = put_user(0, &infop->si_errno);
1650         if (!ret)
1651             ret = put_user(0, &infop->si_code);
1652         if (!ret)
1653             ret = put_user(0, &infop->si_pid);
1654         if (!ret)
1655             ret = put_user(0, &infop->si_uid);
1656         if (!ret)
1657             ret = put_user(0, &infop->si_status);
1658     }
1659 
1660     put_pid(pid);
1661     return ret;
1662 }
1663 
1664 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1665         int, options, struct rusage __user *, ru)
1666 {
1667     struct wait_opts wo;
1668     struct pid *pid = NULL;
1669     enum pid_type type;
1670     long ret;
1671 
1672     if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1673             __WNOTHREAD|__WCLONE|__WALL))
1674         return -EINVAL;
1675 
1676     if (upid == -1)
1677         type = PIDTYPE_MAX;
1678     else if (upid < 0) {
1679         type = PIDTYPE_PGID;
1680         pid = find_get_pid(-upid);
1681     } else if (upid == 0) {
1682         type = PIDTYPE_PGID;
1683         pid = get_task_pid(current, PIDTYPE_PGID);
1684     } else /* upid > 0 */ {
1685         type = PIDTYPE_PID;
1686         pid = find_get_pid(upid);
1687     }
1688 
1689     wo.wo_type  = type;
1690     wo.wo_pid   = pid;
1691     wo.wo_flags = options | WEXITED;
1692     wo.wo_info  = NULL;
1693     wo.wo_stat  = stat_addr;
1694     wo.wo_rusage    = ru;
1695     ret = do_wait(&wo);
1696     put_pid(pid);
1697 
1698     return ret;
1699 }
1700 
1701 #ifdef __ARCH_WANT_SYS_WAITPID
1702 
1703 /*
1704  * sys_waitpid() remains for compatibility. waitpid() should be
1705  * implemented by calling sys_wait4() from libc.a.
1706  */
1707 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1708 {
1709     return sys_wait4(pid, stat_addr, options, NULL);
1710 }
1711 
1712 #endif