Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *     SUCS NET3:
0004  *
0005  *     Generic stream handling routines. These are generic for most
0006  *     protocols. Even IP. Tonight 8-).
0007  *     This is used because TCP, LLC (others too) layer all have mostly
0008  *     identical sendmsg() and recvmsg() code.
0009  *     So we (will) share it here.
0010  *
0011  *     Authors:        Arnaldo Carvalho de Melo <acme@conectiva.com.br>
0012  *                     (from old tcp.c code)
0013  *                     Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-))
0014  */
0015 
0016 #include <linux/module.h>
0017 #include <linux/sched/signal.h>
0018 #include <linux/net.h>
0019 #include <linux/signal.h>
0020 #include <linux/tcp.h>
0021 #include <linux/wait.h>
0022 #include <net/sock.h>
0023 
0024 /**
0025  * sk_stream_write_space - stream socket write_space callback.
0026  * @sk: socket
0027  *
0028  * FIXME: write proper description
0029  */
0030 void sk_stream_write_space(struct sock *sk)
0031 {
0032     struct socket *sock = sk->sk_socket;
0033     struct socket_wq *wq;
0034 
0035     if (__sk_stream_is_writeable(sk, 1) && sock) {
0036         clear_bit(SOCK_NOSPACE, &sock->flags);
0037 
0038         rcu_read_lock();
0039         wq = rcu_dereference(sk->sk_wq);
0040         if (skwq_has_sleeper(wq))
0041             wake_up_interruptible_poll(&wq->wait, EPOLLOUT |
0042                         EPOLLWRNORM | EPOLLWRBAND);
0043         if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
0044             sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
0045         rcu_read_unlock();
0046     }
0047 }
0048 
0049 /**
0050  * sk_stream_wait_connect - Wait for a socket to get into the connected state
0051  * @sk: sock to wait on
0052  * @timeo_p: for how long to wait
0053  *
0054  * Must be called with the socket locked.
0055  */
0056 int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
0057 {
0058     DEFINE_WAIT_FUNC(wait, woken_wake_function);
0059     struct task_struct *tsk = current;
0060     int done;
0061 
0062     do {
0063         int err = sock_error(sk);
0064         if (err)
0065             return err;
0066         if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
0067             return -EPIPE;
0068         if (!*timeo_p)
0069             return -EAGAIN;
0070         if (signal_pending(tsk))
0071             return sock_intr_errno(*timeo_p);
0072 
0073         add_wait_queue(sk_sleep(sk), &wait);
0074         sk->sk_write_pending++;
0075         done = sk_wait_event(sk, timeo_p,
0076                      !sk->sk_err &&
0077                      !((1 << sk->sk_state) &
0078                        ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)), &wait);
0079         remove_wait_queue(sk_sleep(sk), &wait);
0080         sk->sk_write_pending--;
0081     } while (!done);
0082     return 0;
0083 }
0084 EXPORT_SYMBOL(sk_stream_wait_connect);
0085 
0086 /**
0087  * sk_stream_closing - Return 1 if we still have things to send in our buffers.
0088  * @sk: socket to verify
0089  */
0090 static inline int sk_stream_closing(struct sock *sk)
0091 {
0092     return (1 << sk->sk_state) &
0093            (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
0094 }
0095 
0096 void sk_stream_wait_close(struct sock *sk, long timeout)
0097 {
0098     if (timeout) {
0099         DEFINE_WAIT_FUNC(wait, woken_wake_function);
0100 
0101         add_wait_queue(sk_sleep(sk), &wait);
0102 
0103         do {
0104             if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk), &wait))
0105                 break;
0106         } while (!signal_pending(current) && timeout);
0107 
0108         remove_wait_queue(sk_sleep(sk), &wait);
0109     }
0110 }
0111 EXPORT_SYMBOL(sk_stream_wait_close);
0112 
0113 /**
0114  * sk_stream_wait_memory - Wait for more memory for a socket
0115  * @sk: socket to wait for memory
0116  * @timeo_p: for how long
0117  */
0118 int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
0119 {
0120     int err = 0;
0121     long vm_wait = 0;
0122     long current_timeo = *timeo_p;
0123     DEFINE_WAIT_FUNC(wait, woken_wake_function);
0124 
0125     if (sk_stream_memory_free(sk))
0126         current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
0127 
0128     add_wait_queue(sk_sleep(sk), &wait);
0129 
0130     while (1) {
0131         sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
0132 
0133         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
0134             goto do_error;
0135         if (!*timeo_p)
0136             goto do_eagain;
0137         if (signal_pending(current))
0138             goto do_interrupted;
0139         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
0140         if (sk_stream_memory_free(sk) && !vm_wait)
0141             break;
0142 
0143         set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
0144         sk->sk_write_pending++;
0145         sk_wait_event(sk, &current_timeo, sk->sk_err ||
0146                           (sk->sk_shutdown & SEND_SHUTDOWN) ||
0147                           (sk_stream_memory_free(sk) &&
0148                           !vm_wait), &wait);
0149         sk->sk_write_pending--;
0150 
0151         if (vm_wait) {
0152             vm_wait -= current_timeo;
0153             current_timeo = *timeo_p;
0154             if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
0155                 (current_timeo -= vm_wait) < 0)
0156                 current_timeo = 0;
0157             vm_wait = 0;
0158         }
0159         *timeo_p = current_timeo;
0160     }
0161 out:
0162     remove_wait_queue(sk_sleep(sk), &wait);
0163     return err;
0164 
0165 do_error:
0166     err = -EPIPE;
0167     goto out;
0168 do_eagain:
0169     /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
0170      * be generated later.
0171      * When TCP receives ACK packets that make room, tcp_check_space()
0172      * only calls tcp_new_space() if SOCK_NOSPACE is set.
0173      */
0174     set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
0175     err = -EAGAIN;
0176     goto out;
0177 do_interrupted:
0178     err = sock_intr_errno(*timeo_p);
0179     goto out;
0180 }
0181 EXPORT_SYMBOL(sk_stream_wait_memory);
0182 
0183 int sk_stream_error(struct sock *sk, int flags, int err)
0184 {
0185     if (err == -EPIPE)
0186         err = sock_error(sk) ? : -EPIPE;
0187     if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
0188         send_sig(SIGPIPE, current, 0);
0189     return err;
0190 }
0191 EXPORT_SYMBOL(sk_stream_error);
0192 
0193 void sk_stream_kill_queues(struct sock *sk)
0194 {
0195     /* First the read buffer. */
0196     __skb_queue_purge(&sk->sk_receive_queue);
0197 
0198     /* Next, the write queue. */
0199     WARN_ON_ONCE(!skb_queue_empty(&sk->sk_write_queue));
0200 
0201     /* Account for returned memory. */
0202     sk_mem_reclaim_final(sk);
0203 
0204     WARN_ON_ONCE(sk->sk_wmem_queued);
0205     WARN_ON_ONCE(sk->sk_forward_alloc);
0206 
0207     /* It is _impossible_ for the backlog to contain anything
0208      * when we get here.  All user references to this socket
0209      * have gone away, only the net layer knows can touch it.
0210      */
0211 }
0212 EXPORT_SYMBOL(sk_stream_kill_queues);