Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * NET3:    Garbage Collector For AF_UNIX sockets
0004  *
0005  * Garbage Collector:
0006  *  Copyright (C) Barak A. Pearlmutter.
0007  *
0008  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
0009  * If it doesn't work blame me, it worked when Barak sent it.
0010  *
0011  * Assumptions:
0012  *
0013  *  - object w/ a bit
0014  *  - free list
0015  *
0016  * Current optimizations:
0017  *
0018  *  - explicit stack instead of recursion
0019  *  - tail recurse on first born instead of immediate push/pop
0020  *  - we gather the stuff that should not be killed into tree
0021  *    and stack is just a path from root to the current pointer.
0022  *
0023  *  Future optimizations:
0024  *
0025  *  - don't just push entire root set; process in place
0026  *
0027  *  Fixes:
0028  *  Alan Cox    07 Sept 1997    Vmalloc internal stack as needed.
0029  *                  Cope with changing max_files.
0030  *  Al Viro     11 Oct 1998
0031  *      Graph may have cycles. That is, we can send the descriptor
0032  *      of foo to bar and vice versa. Current code chokes on that.
0033  *      Fix: move SCM_RIGHTS ones into the separate list and then
0034  *      skb_free() them all instead of doing explicit fput's.
0035  *      Another problem: since fput() may block somebody may
0036  *      create a new unix_socket when we are in the middle of sweep
0037  *      phase. Fix: revert the logic wrt MARKED. Mark everything
0038  *      upon the beginning and unmark non-junk ones.
0039  *
0040  *      [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
0041  *      sent to connect()'ed but still not accept()'ed sockets.
0042  *      Fixed. Old code had slightly different problem here:
0043  *      extra fput() in situation when we passed the descriptor via
0044  *      such socket and closed it (descriptor). That would happen on
0045  *      each unix_gc() until the accept(). Since the struct file in
0046  *      question would go to the free list and might be reused...
0047  *      That might be the reason of random oopses on filp_close()
0048  *      in unrelated processes.
0049  *
0050  *  AV      28 Feb 1999
0051  *      Kill the explicit allocation of stack. Now we keep the tree
0052  *      with root in dummy + pointer (gc_current) to one of the nodes.
0053  *      Stack is represented as path from gc_current to dummy. Unmark
0054  *      now means "add to tree". Push == "make it a son of gc_current".
0055  *      Pop == "move gc_current to parent". We keep only pointers to
0056  *      parents (->gc_tree).
0057  *  AV      1 Mar 1999
0058  *      Damn. Added missing check for ->dead in listen queues scanning.
0059  *
0060  *  Miklos Szeredi 25 Jun 2007
0061  *      Reimplement with a cycle collecting algorithm. This should
0062  *      solve several problems with the previous code, like being racy
0063  *      wrt receive and holding up unrelated socket operations.
0064  */
0065 
0066 #include <linux/kernel.h>
0067 #include <linux/string.h>
0068 #include <linux/socket.h>
0069 #include <linux/un.h>
0070 #include <linux/net.h>
0071 #include <linux/fs.h>
0072 #include <linux/skbuff.h>
0073 #include <linux/netdevice.h>
0074 #include <linux/file.h>
0075 #include <linux/proc_fs.h>
0076 #include <linux/mutex.h>
0077 #include <linux/wait.h>
0078 
0079 #include <net/sock.h>
0080 #include <net/af_unix.h>
0081 #include <net/scm.h>
0082 #include <net/tcp_states.h>
0083 
0084 #include "scm.h"
0085 
0086 /* Internal data structures and random procedures: */
0087 
0088 static LIST_HEAD(gc_candidates);
0089 static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
0090 
0091 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
0092               struct sk_buff_head *hitlist)
0093 {
0094     struct sk_buff *skb;
0095     struct sk_buff *next;
0096 
0097     spin_lock(&x->sk_receive_queue.lock);
0098     skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
0099         /* Do we have file descriptors ? */
0100         if (UNIXCB(skb).fp) {
0101             bool hit = false;
0102             /* Process the descriptors of this socket */
0103             int nfd = UNIXCB(skb).fp->count;
0104             struct file **fp = UNIXCB(skb).fp->fp;
0105 
0106             while (nfd--) {
0107                 /* Get the socket the fd matches if it indeed does so */
0108                 struct sock *sk = unix_get_socket(*fp++);
0109 
0110                 if (sk) {
0111                     struct unix_sock *u = unix_sk(sk);
0112 
0113                     /* Ignore non-candidates, they could
0114                      * have been added to the queues after
0115                      * starting the garbage collection
0116                      */
0117                     if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
0118                         hit = true;
0119 
0120                         func(u);
0121                     }
0122                 }
0123             }
0124             if (hit && hitlist != NULL) {
0125                 __skb_unlink(skb, &x->sk_receive_queue);
0126                 __skb_queue_tail(hitlist, skb);
0127             }
0128         }
0129     }
0130     spin_unlock(&x->sk_receive_queue.lock);
0131 }
0132 
0133 static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
0134               struct sk_buff_head *hitlist)
0135 {
0136     if (x->sk_state != TCP_LISTEN) {
0137         scan_inflight(x, func, hitlist);
0138     } else {
0139         struct sk_buff *skb;
0140         struct sk_buff *next;
0141         struct unix_sock *u;
0142         LIST_HEAD(embryos);
0143 
0144         /* For a listening socket collect the queued embryos
0145          * and perform a scan on them as well.
0146          */
0147         spin_lock(&x->sk_receive_queue.lock);
0148         skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
0149             u = unix_sk(skb->sk);
0150 
0151             /* An embryo cannot be in-flight, so it's safe
0152              * to use the list link.
0153              */
0154             BUG_ON(!list_empty(&u->link));
0155             list_add_tail(&u->link, &embryos);
0156         }
0157         spin_unlock(&x->sk_receive_queue.lock);
0158 
0159         while (!list_empty(&embryos)) {
0160             u = list_entry(embryos.next, struct unix_sock, link);
0161             scan_inflight(&u->sk, func, hitlist);
0162             list_del_init(&u->link);
0163         }
0164     }
0165 }
0166 
0167 static void dec_inflight(struct unix_sock *usk)
0168 {
0169     atomic_long_dec(&usk->inflight);
0170 }
0171 
0172 static void inc_inflight(struct unix_sock *usk)
0173 {
0174     atomic_long_inc(&usk->inflight);
0175 }
0176 
0177 static void inc_inflight_move_tail(struct unix_sock *u)
0178 {
0179     atomic_long_inc(&u->inflight);
0180     /* If this still might be part of a cycle, move it to the end
0181      * of the list, so that it's checked even if it was already
0182      * passed over
0183      */
0184     if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
0185         list_move_tail(&u->link, &gc_candidates);
0186 }
0187 
0188 static bool gc_in_progress;
0189 #define UNIX_INFLIGHT_TRIGGER_GC 16000
0190 
0191 void wait_for_unix_gc(void)
0192 {
0193     /* If number of inflight sockets is insane,
0194      * force a garbage collect right now.
0195      * Paired with the WRITE_ONCE() in unix_inflight(),
0196      * unix_notinflight() and gc_in_progress().
0197      */
0198     if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
0199         !READ_ONCE(gc_in_progress))
0200         unix_gc();
0201     wait_event(unix_gc_wait, gc_in_progress == false);
0202 }
0203 
0204 /* The external entry point: unix_gc() */
0205 void unix_gc(void)
0206 {
0207     struct unix_sock *u;
0208     struct unix_sock *next;
0209     struct sk_buff_head hitlist;
0210     struct list_head cursor;
0211     LIST_HEAD(not_cycle_list);
0212 
0213     spin_lock(&unix_gc_lock);
0214 
0215     /* Avoid a recursive GC. */
0216     if (gc_in_progress)
0217         goto out;
0218 
0219     /* Paired with READ_ONCE() in wait_for_unix_gc(). */
0220     WRITE_ONCE(gc_in_progress, true);
0221 
0222     /* First, select candidates for garbage collection.  Only
0223      * in-flight sockets are considered, and from those only ones
0224      * which don't have any external reference.
0225      *
0226      * Holding unix_gc_lock will protect these candidates from
0227      * being detached, and hence from gaining an external
0228      * reference.  Since there are no possible receivers, all
0229      * buffers currently on the candidates' queues stay there
0230      * during the garbage collection.
0231      *
0232      * We also know that no new candidate can be added onto the
0233      * receive queues.  Other, non candidate sockets _can_ be
0234      * added to queue, so we must make sure only to touch
0235      * candidates.
0236      */
0237     list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
0238         long total_refs;
0239         long inflight_refs;
0240 
0241         total_refs = file_count(u->sk.sk_socket->file);
0242         inflight_refs = atomic_long_read(&u->inflight);
0243 
0244         BUG_ON(inflight_refs < 1);
0245         BUG_ON(total_refs < inflight_refs);
0246         if (total_refs == inflight_refs) {
0247             list_move_tail(&u->link, &gc_candidates);
0248             __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
0249             __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
0250         }
0251     }
0252 
0253     /* Now remove all internal in-flight reference to children of
0254      * the candidates.
0255      */
0256     list_for_each_entry(u, &gc_candidates, link)
0257         scan_children(&u->sk, dec_inflight, NULL);
0258 
0259     /* Restore the references for children of all candidates,
0260      * which have remaining references.  Do this recursively, so
0261      * only those remain, which form cyclic references.
0262      *
0263      * Use a "cursor" link, to make the list traversal safe, even
0264      * though elements might be moved about.
0265      */
0266     list_add(&cursor, &gc_candidates);
0267     while (cursor.next != &gc_candidates) {
0268         u = list_entry(cursor.next, struct unix_sock, link);
0269 
0270         /* Move cursor to after the current position. */
0271         list_move(&cursor, &u->link);
0272 
0273         if (atomic_long_read(&u->inflight) > 0) {
0274             list_move_tail(&u->link, &not_cycle_list);
0275             __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
0276             scan_children(&u->sk, inc_inflight_move_tail, NULL);
0277         }
0278     }
0279     list_del(&cursor);
0280 
0281     /* Now gc_candidates contains only garbage.  Restore original
0282      * inflight counters for these as well, and remove the skbuffs
0283      * which are creating the cycle(s).
0284      */
0285     skb_queue_head_init(&hitlist);
0286     list_for_each_entry(u, &gc_candidates, link)
0287         scan_children(&u->sk, inc_inflight, &hitlist);
0288 
0289     /* not_cycle_list contains those sockets which do not make up a
0290      * cycle.  Restore these to the inflight list.
0291      */
0292     while (!list_empty(&not_cycle_list)) {
0293         u = list_entry(not_cycle_list.next, struct unix_sock, link);
0294         __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
0295         list_move_tail(&u->link, &gc_inflight_list);
0296     }
0297 
0298     spin_unlock(&unix_gc_lock);
0299 
0300     /* Here we are. Hitlist is filled. Die. */
0301     __skb_queue_purge(&hitlist);
0302 
0303     spin_lock(&unix_gc_lock);
0304 
0305     /* All candidates should have been detached by now. */
0306     BUG_ON(!list_empty(&gc_candidates));
0307 
0308     /* Paired with READ_ONCE() in wait_for_unix_gc(). */
0309     WRITE_ONCE(gc_in_progress, false);
0310 
0311     wake_up(&unix_gc_wait);
0312 
0313  out:
0314     spin_unlock(&unix_gc_lock);
0315 }