Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * common code for virtio vsock
0004  *
0005  * Copyright (C) 2013-2015 Red Hat, Inc.
0006  * Author: Asias He <asias@redhat.com>
0007  *         Stefan Hajnoczi <stefanha@redhat.com>
0008  */
0009 #include <linux/spinlock.h>
0010 #include <linux/module.h>
0011 #include <linux/sched/signal.h>
0012 #include <linux/ctype.h>
0013 #include <linux/list.h>
0014 #include <linux/virtio_vsock.h>
0015 #include <uapi/linux/vsockmon.h>
0016 
0017 #include <net/sock.h>
0018 #include <net/af_vsock.h>
0019 
0020 #define CREATE_TRACE_POINTS
0021 #include <trace/events/vsock_virtio_transport_common.h>
0022 
0023 /* How long to wait for graceful shutdown of a connection */
0024 #define VSOCK_CLOSE_TIMEOUT (8 * HZ)
0025 
0026 /* Threshold for detecting small packets to copy */
0027 #define GOOD_COPY_LEN  128
0028 
0029 static const struct virtio_transport *
0030 virtio_transport_get_ops(struct vsock_sock *vsk)
0031 {
0032     const struct vsock_transport *t = vsock_core_get_transport(vsk);
0033 
0034     if (WARN_ON(!t))
0035         return NULL;
0036 
0037     return container_of(t, struct virtio_transport, transport);
0038 }
0039 
0040 static struct virtio_vsock_pkt *
0041 virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
0042                size_t len,
0043                u32 src_cid,
0044                u32 src_port,
0045                u32 dst_cid,
0046                u32 dst_port)
0047 {
0048     struct virtio_vsock_pkt *pkt;
0049     int err;
0050 
0051     pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
0052     if (!pkt)
0053         return NULL;
0054 
0055     pkt->hdr.type       = cpu_to_le16(info->type);
0056     pkt->hdr.op     = cpu_to_le16(info->op);
0057     pkt->hdr.src_cid    = cpu_to_le64(src_cid);
0058     pkt->hdr.dst_cid    = cpu_to_le64(dst_cid);
0059     pkt->hdr.src_port   = cpu_to_le32(src_port);
0060     pkt->hdr.dst_port   = cpu_to_le32(dst_port);
0061     pkt->hdr.flags      = cpu_to_le32(info->flags);
0062     pkt->len        = len;
0063     pkt->hdr.len        = cpu_to_le32(len);
0064     pkt->reply      = info->reply;
0065     pkt->vsk        = info->vsk;
0066 
0067     if (info->msg && len > 0) {
0068         pkt->buf = kmalloc(len, GFP_KERNEL);
0069         if (!pkt->buf)
0070             goto out_pkt;
0071 
0072         pkt->buf_len = len;
0073 
0074         err = memcpy_from_msg(pkt->buf, info->msg, len);
0075         if (err)
0076             goto out;
0077 
0078         if (msg_data_left(info->msg) == 0 &&
0079             info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
0080             pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
0081 
0082             if (info->msg->msg_flags & MSG_EOR)
0083                 pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
0084         }
0085     }
0086 
0087     trace_virtio_transport_alloc_pkt(src_cid, src_port,
0088                      dst_cid, dst_port,
0089                      len,
0090                      info->type,
0091                      info->op,
0092                      info->flags);
0093 
0094     return pkt;
0095 
0096 out:
0097     kfree(pkt->buf);
0098 out_pkt:
0099     kfree(pkt);
0100     return NULL;
0101 }
0102 
0103 /* Packet capture */
0104 static struct sk_buff *virtio_transport_build_skb(void *opaque)
0105 {
0106     struct virtio_vsock_pkt *pkt = opaque;
0107     struct af_vsockmon_hdr *hdr;
0108     struct sk_buff *skb;
0109     size_t payload_len;
0110     void *payload_buf;
0111 
0112     /* A packet could be split to fit the RX buffer, so we can retrieve
0113      * the payload length from the header and the buffer pointer taking
0114      * care of the offset in the original packet.
0115      */
0116     payload_len = le32_to_cpu(pkt->hdr.len);
0117     payload_buf = pkt->buf + pkt->off;
0118 
0119     skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
0120             GFP_ATOMIC);
0121     if (!skb)
0122         return NULL;
0123 
0124     hdr = skb_put(skb, sizeof(*hdr));
0125 
0126     /* pkt->hdr is little-endian so no need to byteswap here */
0127     hdr->src_cid = pkt->hdr.src_cid;
0128     hdr->src_port = pkt->hdr.src_port;
0129     hdr->dst_cid = pkt->hdr.dst_cid;
0130     hdr->dst_port = pkt->hdr.dst_port;
0131 
0132     hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
0133     hdr->len = cpu_to_le16(sizeof(pkt->hdr));
0134     memset(hdr->reserved, 0, sizeof(hdr->reserved));
0135 
0136     switch (le16_to_cpu(pkt->hdr.op)) {
0137     case VIRTIO_VSOCK_OP_REQUEST:
0138     case VIRTIO_VSOCK_OP_RESPONSE:
0139         hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
0140         break;
0141     case VIRTIO_VSOCK_OP_RST:
0142     case VIRTIO_VSOCK_OP_SHUTDOWN:
0143         hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT);
0144         break;
0145     case VIRTIO_VSOCK_OP_RW:
0146         hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD);
0147         break;
0148     case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
0149     case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
0150         hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
0151         break;
0152     default:
0153         hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN);
0154         break;
0155     }
0156 
0157     skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
0158 
0159     if (payload_len) {
0160         skb_put_data(skb, payload_buf, payload_len);
0161     }
0162 
0163     return skb;
0164 }
0165 
0166 void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
0167 {
0168     if (pkt->tap_delivered)
0169         return;
0170 
0171     vsock_deliver_tap(virtio_transport_build_skb, pkt);
0172     pkt->tap_delivered = true;
0173 }
0174 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
0175 
0176 static u16 virtio_transport_get_type(struct sock *sk)
0177 {
0178     if (sk->sk_type == SOCK_STREAM)
0179         return VIRTIO_VSOCK_TYPE_STREAM;
0180     else
0181         return VIRTIO_VSOCK_TYPE_SEQPACKET;
0182 }
0183 
0184 /* This function can only be used on connecting/connected sockets,
0185  * since a socket assigned to a transport is required.
0186  *
0187  * Do not use on listener sockets!
0188  */
0189 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
0190                       struct virtio_vsock_pkt_info *info)
0191 {
0192     u32 src_cid, src_port, dst_cid, dst_port;
0193     const struct virtio_transport *t_ops;
0194     struct virtio_vsock_sock *vvs;
0195     struct virtio_vsock_pkt *pkt;
0196     u32 pkt_len = info->pkt_len;
0197 
0198     info->type = virtio_transport_get_type(sk_vsock(vsk));
0199 
0200     t_ops = virtio_transport_get_ops(vsk);
0201     if (unlikely(!t_ops))
0202         return -EFAULT;
0203 
0204     src_cid = t_ops->transport.get_local_cid();
0205     src_port = vsk->local_addr.svm_port;
0206     if (!info->remote_cid) {
0207         dst_cid = vsk->remote_addr.svm_cid;
0208         dst_port = vsk->remote_addr.svm_port;
0209     } else {
0210         dst_cid = info->remote_cid;
0211         dst_port = info->remote_port;
0212     }
0213 
0214     vvs = vsk->trans;
0215 
0216     /* we can send less than pkt_len bytes */
0217     if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
0218         pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
0219 
0220     /* virtio_transport_get_credit might return less than pkt_len credit */
0221     pkt_len = virtio_transport_get_credit(vvs, pkt_len);
0222 
0223     /* Do not send zero length OP_RW pkt */
0224     if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
0225         return pkt_len;
0226 
0227     pkt = virtio_transport_alloc_pkt(info, pkt_len,
0228                      src_cid, src_port,
0229                      dst_cid, dst_port);
0230     if (!pkt) {
0231         virtio_transport_put_credit(vvs, pkt_len);
0232         return -ENOMEM;
0233     }
0234 
0235     virtio_transport_inc_tx_pkt(vvs, pkt);
0236 
0237     return t_ops->send_pkt(pkt);
0238 }
0239 
0240 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
0241                     struct virtio_vsock_pkt *pkt)
0242 {
0243     if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
0244         return false;
0245 
0246     vvs->rx_bytes += pkt->len;
0247     return true;
0248 }
0249 
0250 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
0251                     struct virtio_vsock_pkt *pkt)
0252 {
0253     vvs->rx_bytes -= pkt->len;
0254     vvs->fwd_cnt += pkt->len;
0255 }
0256 
0257 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
0258 {
0259     spin_lock_bh(&vvs->rx_lock);
0260     vvs->last_fwd_cnt = vvs->fwd_cnt;
0261     pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
0262     pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
0263     spin_unlock_bh(&vvs->rx_lock);
0264 }
0265 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
0266 
0267 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
0268 {
0269     u32 ret;
0270 
0271     spin_lock_bh(&vvs->tx_lock);
0272     ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
0273     if (ret > credit)
0274         ret = credit;
0275     vvs->tx_cnt += ret;
0276     spin_unlock_bh(&vvs->tx_lock);
0277 
0278     return ret;
0279 }
0280 EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
0281 
0282 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
0283 {
0284     spin_lock_bh(&vvs->tx_lock);
0285     vvs->tx_cnt -= credit;
0286     spin_unlock_bh(&vvs->tx_lock);
0287 }
0288 EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
0289 
0290 static int virtio_transport_send_credit_update(struct vsock_sock *vsk)
0291 {
0292     struct virtio_vsock_pkt_info info = {
0293         .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
0294         .vsk = vsk,
0295     };
0296 
0297     return virtio_transport_send_pkt_info(vsk, &info);
0298 }
0299 
0300 static ssize_t
0301 virtio_transport_stream_do_peek(struct vsock_sock *vsk,
0302                 struct msghdr *msg,
0303                 size_t len)
0304 {
0305     struct virtio_vsock_sock *vvs = vsk->trans;
0306     struct virtio_vsock_pkt *pkt;
0307     size_t bytes, total = 0, off;
0308     int err = -EFAULT;
0309 
0310     spin_lock_bh(&vvs->rx_lock);
0311 
0312     list_for_each_entry(pkt, &vvs->rx_queue, list) {
0313         off = pkt->off;
0314 
0315         if (total == len)
0316             break;
0317 
0318         while (total < len && off < pkt->len) {
0319             bytes = len - total;
0320             if (bytes > pkt->len - off)
0321                 bytes = pkt->len - off;
0322 
0323             /* sk_lock is held by caller so no one else can dequeue.
0324              * Unlock rx_lock since memcpy_to_msg() may sleep.
0325              */
0326             spin_unlock_bh(&vvs->rx_lock);
0327 
0328             err = memcpy_to_msg(msg, pkt->buf + off, bytes);
0329             if (err)
0330                 goto out;
0331 
0332             spin_lock_bh(&vvs->rx_lock);
0333 
0334             total += bytes;
0335             off += bytes;
0336         }
0337     }
0338 
0339     spin_unlock_bh(&vvs->rx_lock);
0340 
0341     return total;
0342 
0343 out:
0344     if (total)
0345         err = total;
0346     return err;
0347 }
0348 
0349 static ssize_t
0350 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
0351                    struct msghdr *msg,
0352                    size_t len)
0353 {
0354     struct virtio_vsock_sock *vvs = vsk->trans;
0355     struct virtio_vsock_pkt *pkt;
0356     size_t bytes, total = 0;
0357     u32 free_space;
0358     int err = -EFAULT;
0359 
0360     spin_lock_bh(&vvs->rx_lock);
0361     while (total < len && !list_empty(&vvs->rx_queue)) {
0362         pkt = list_first_entry(&vvs->rx_queue,
0363                        struct virtio_vsock_pkt, list);
0364 
0365         bytes = len - total;
0366         if (bytes > pkt->len - pkt->off)
0367             bytes = pkt->len - pkt->off;
0368 
0369         /* sk_lock is held by caller so no one else can dequeue.
0370          * Unlock rx_lock since memcpy_to_msg() may sleep.
0371          */
0372         spin_unlock_bh(&vvs->rx_lock);
0373 
0374         err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
0375         if (err)
0376             goto out;
0377 
0378         spin_lock_bh(&vvs->rx_lock);
0379 
0380         total += bytes;
0381         pkt->off += bytes;
0382         if (pkt->off == pkt->len) {
0383             virtio_transport_dec_rx_pkt(vvs, pkt);
0384             list_del(&pkt->list);
0385             virtio_transport_free_pkt(pkt);
0386         }
0387     }
0388 
0389     free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
0390 
0391     spin_unlock_bh(&vvs->rx_lock);
0392 
0393     /* To reduce the number of credit update messages,
0394      * don't update credits as long as lots of space is available.
0395      * Note: the limit chosen here is arbitrary. Setting the limit
0396      * too high causes extra messages. Too low causes transmitter
0397      * stalls. As stalls are in theory more expensive than extra
0398      * messages, we set the limit to a high value. TODO: experiment
0399      * with different values.
0400      */
0401     if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
0402         virtio_transport_send_credit_update(vsk);
0403 
0404     return total;
0405 
0406 out:
0407     if (total)
0408         err = total;
0409     return err;
0410 }
0411 
0412 static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
0413                          struct msghdr *msg,
0414                          int flags)
0415 {
0416     struct virtio_vsock_sock *vvs = vsk->trans;
0417     struct virtio_vsock_pkt *pkt;
0418     int dequeued_len = 0;
0419     size_t user_buf_len = msg_data_left(msg);
0420     bool msg_ready = false;
0421 
0422     spin_lock_bh(&vvs->rx_lock);
0423 
0424     if (vvs->msg_count == 0) {
0425         spin_unlock_bh(&vvs->rx_lock);
0426         return 0;
0427     }
0428 
0429     while (!msg_ready) {
0430         pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
0431 
0432         if (dequeued_len >= 0) {
0433             size_t pkt_len;
0434             size_t bytes_to_copy;
0435 
0436             pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
0437             bytes_to_copy = min(user_buf_len, pkt_len);
0438 
0439             if (bytes_to_copy) {
0440                 int err;
0441 
0442                 /* sk_lock is held by caller so no one else can dequeue.
0443                  * Unlock rx_lock since memcpy_to_msg() may sleep.
0444                  */
0445                 spin_unlock_bh(&vvs->rx_lock);
0446 
0447                 err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy);
0448                 if (err) {
0449                     /* Copy of message failed. Rest of
0450                      * fragments will be freed without copy.
0451                      */
0452                     dequeued_len = err;
0453                 } else {
0454                     user_buf_len -= bytes_to_copy;
0455                 }
0456 
0457                 spin_lock_bh(&vvs->rx_lock);
0458             }
0459 
0460             if (dequeued_len >= 0)
0461                 dequeued_len += pkt_len;
0462         }
0463 
0464         if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
0465             msg_ready = true;
0466             vvs->msg_count--;
0467 
0468             if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)
0469                 msg->msg_flags |= MSG_EOR;
0470         }
0471 
0472         virtio_transport_dec_rx_pkt(vvs, pkt);
0473         list_del(&pkt->list);
0474         virtio_transport_free_pkt(pkt);
0475     }
0476 
0477     spin_unlock_bh(&vvs->rx_lock);
0478 
0479     virtio_transport_send_credit_update(vsk);
0480 
0481     return dequeued_len;
0482 }
0483 
0484 ssize_t
0485 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
0486                 struct msghdr *msg,
0487                 size_t len, int flags)
0488 {
0489     if (flags & MSG_PEEK)
0490         return virtio_transport_stream_do_peek(vsk, msg, len);
0491     else
0492         return virtio_transport_stream_do_dequeue(vsk, msg, len);
0493 }
0494 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
0495 
0496 ssize_t
0497 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
0498                    struct msghdr *msg,
0499                    int flags)
0500 {
0501     if (flags & MSG_PEEK)
0502         return -EOPNOTSUPP;
0503 
0504     return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags);
0505 }
0506 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
0507 
0508 int
0509 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
0510                    struct msghdr *msg,
0511                    size_t len)
0512 {
0513     struct virtio_vsock_sock *vvs = vsk->trans;
0514 
0515     spin_lock_bh(&vvs->tx_lock);
0516 
0517     if (len > vvs->peer_buf_alloc) {
0518         spin_unlock_bh(&vvs->tx_lock);
0519         return -EMSGSIZE;
0520     }
0521 
0522     spin_unlock_bh(&vvs->tx_lock);
0523 
0524     return virtio_transport_stream_enqueue(vsk, msg, len);
0525 }
0526 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue);
0527 
0528 int
0529 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
0530                    struct msghdr *msg,
0531                    size_t len, int flags)
0532 {
0533     return -EOPNOTSUPP;
0534 }
0535 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
0536 
0537 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
0538 {
0539     struct virtio_vsock_sock *vvs = vsk->trans;
0540     s64 bytes;
0541 
0542     spin_lock_bh(&vvs->rx_lock);
0543     bytes = vvs->rx_bytes;
0544     spin_unlock_bh(&vvs->rx_lock);
0545 
0546     return bytes;
0547 }
0548 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
0549 
0550 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
0551 {
0552     struct virtio_vsock_sock *vvs = vsk->trans;
0553     u32 msg_count;
0554 
0555     spin_lock_bh(&vvs->rx_lock);
0556     msg_count = vvs->msg_count;
0557     spin_unlock_bh(&vvs->rx_lock);
0558 
0559     return msg_count;
0560 }
0561 EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
0562 
0563 static s64 virtio_transport_has_space(struct vsock_sock *vsk)
0564 {
0565     struct virtio_vsock_sock *vvs = vsk->trans;
0566     s64 bytes;
0567 
0568     bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
0569     if (bytes < 0)
0570         bytes = 0;
0571 
0572     return bytes;
0573 }
0574 
0575 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
0576 {
0577     struct virtio_vsock_sock *vvs = vsk->trans;
0578     s64 bytes;
0579 
0580     spin_lock_bh(&vvs->tx_lock);
0581     bytes = virtio_transport_has_space(vsk);
0582     spin_unlock_bh(&vvs->tx_lock);
0583 
0584     return bytes;
0585 }
0586 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
0587 
0588 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
0589                     struct vsock_sock *psk)
0590 {
0591     struct virtio_vsock_sock *vvs;
0592 
0593     vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
0594     if (!vvs)
0595         return -ENOMEM;
0596 
0597     vsk->trans = vvs;
0598     vvs->vsk = vsk;
0599     if (psk && psk->trans) {
0600         struct virtio_vsock_sock *ptrans = psk->trans;
0601 
0602         vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
0603     }
0604 
0605     if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE)
0606         vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE;
0607 
0608     vvs->buf_alloc = vsk->buffer_size;
0609 
0610     spin_lock_init(&vvs->rx_lock);
0611     spin_lock_init(&vvs->tx_lock);
0612     INIT_LIST_HEAD(&vvs->rx_queue);
0613 
0614     return 0;
0615 }
0616 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
0617 
0618 /* sk_lock held by the caller */
0619 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
0620 {
0621     struct virtio_vsock_sock *vvs = vsk->trans;
0622 
0623     if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
0624         *val = VIRTIO_VSOCK_MAX_BUF_SIZE;
0625 
0626     vvs->buf_alloc = *val;
0627 
0628     virtio_transport_send_credit_update(vsk);
0629 }
0630 EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
0631 
0632 int
0633 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
0634                 size_t target,
0635                 bool *data_ready_now)
0636 {
0637     if (vsock_stream_has_data(vsk))
0638         *data_ready_now = true;
0639     else
0640         *data_ready_now = false;
0641 
0642     return 0;
0643 }
0644 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
0645 
0646 int
0647 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
0648                  size_t target,
0649                  bool *space_avail_now)
0650 {
0651     s64 free_space;
0652 
0653     free_space = vsock_stream_has_space(vsk);
0654     if (free_space > 0)
0655         *space_avail_now = true;
0656     else if (free_space == 0)
0657         *space_avail_now = false;
0658 
0659     return 0;
0660 }
0661 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
0662 
0663 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
0664     size_t target, struct vsock_transport_recv_notify_data *data)
0665 {
0666     return 0;
0667 }
0668 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
0669 
0670 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
0671     size_t target, struct vsock_transport_recv_notify_data *data)
0672 {
0673     return 0;
0674 }
0675 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
0676 
0677 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
0678     size_t target, struct vsock_transport_recv_notify_data *data)
0679 {
0680     return 0;
0681 }
0682 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
0683 
0684 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
0685     size_t target, ssize_t copied, bool data_read,
0686     struct vsock_transport_recv_notify_data *data)
0687 {
0688     return 0;
0689 }
0690 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
0691 
0692 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
0693     struct vsock_transport_send_notify_data *data)
0694 {
0695     return 0;
0696 }
0697 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
0698 
0699 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
0700     struct vsock_transport_send_notify_data *data)
0701 {
0702     return 0;
0703 }
0704 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
0705 
0706 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
0707     struct vsock_transport_send_notify_data *data)
0708 {
0709     return 0;
0710 }
0711 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
0712 
0713 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
0714     ssize_t written, struct vsock_transport_send_notify_data *data)
0715 {
0716     return 0;
0717 }
0718 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
0719 
0720 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
0721 {
0722     return vsk->buffer_size;
0723 }
0724 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
0725 
0726 bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
0727 {
0728     return true;
0729 }
0730 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
0731 
0732 bool virtio_transport_stream_allow(u32 cid, u32 port)
0733 {
0734     return true;
0735 }
0736 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
0737 
0738 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
0739                 struct sockaddr_vm *addr)
0740 {
0741     return -EOPNOTSUPP;
0742 }
0743 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
0744 
0745 bool virtio_transport_dgram_allow(u32 cid, u32 port)
0746 {
0747     return false;
0748 }
0749 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
0750 
0751 int virtio_transport_connect(struct vsock_sock *vsk)
0752 {
0753     struct virtio_vsock_pkt_info info = {
0754         .op = VIRTIO_VSOCK_OP_REQUEST,
0755         .vsk = vsk,
0756     };
0757 
0758     return virtio_transport_send_pkt_info(vsk, &info);
0759 }
0760 EXPORT_SYMBOL_GPL(virtio_transport_connect);
0761 
0762 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
0763 {
0764     struct virtio_vsock_pkt_info info = {
0765         .op = VIRTIO_VSOCK_OP_SHUTDOWN,
0766         .flags = (mode & RCV_SHUTDOWN ?
0767               VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
0768              (mode & SEND_SHUTDOWN ?
0769               VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
0770         .vsk = vsk,
0771     };
0772 
0773     return virtio_transport_send_pkt_info(vsk, &info);
0774 }
0775 EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
0776 
0777 int
0778 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
0779                    struct sockaddr_vm *remote_addr,
0780                    struct msghdr *msg,
0781                    size_t dgram_len)
0782 {
0783     return -EOPNOTSUPP;
0784 }
0785 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
0786 
0787 ssize_t
0788 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
0789                 struct msghdr *msg,
0790                 size_t len)
0791 {
0792     struct virtio_vsock_pkt_info info = {
0793         .op = VIRTIO_VSOCK_OP_RW,
0794         .msg = msg,
0795         .pkt_len = len,
0796         .vsk = vsk,
0797     };
0798 
0799     return virtio_transport_send_pkt_info(vsk, &info);
0800 }
0801 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
0802 
0803 void virtio_transport_destruct(struct vsock_sock *vsk)
0804 {
0805     struct virtio_vsock_sock *vvs = vsk->trans;
0806 
0807     kfree(vvs);
0808 }
0809 EXPORT_SYMBOL_GPL(virtio_transport_destruct);
0810 
0811 static int virtio_transport_reset(struct vsock_sock *vsk,
0812                   struct virtio_vsock_pkt *pkt)
0813 {
0814     struct virtio_vsock_pkt_info info = {
0815         .op = VIRTIO_VSOCK_OP_RST,
0816         .reply = !!pkt,
0817         .vsk = vsk,
0818     };
0819 
0820     /* Send RST only if the original pkt is not a RST pkt */
0821     if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
0822         return 0;
0823 
0824     return virtio_transport_send_pkt_info(vsk, &info);
0825 }
0826 
0827 /* Normally packets are associated with a socket.  There may be no socket if an
0828  * attempt was made to connect to a socket that does not exist.
0829  */
0830 static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
0831                       struct virtio_vsock_pkt *pkt)
0832 {
0833     struct virtio_vsock_pkt *reply;
0834     struct virtio_vsock_pkt_info info = {
0835         .op = VIRTIO_VSOCK_OP_RST,
0836         .type = le16_to_cpu(pkt->hdr.type),
0837         .reply = true,
0838     };
0839 
0840     /* Send RST only if the original pkt is not a RST pkt */
0841     if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
0842         return 0;
0843 
0844     reply = virtio_transport_alloc_pkt(&info, 0,
0845                        le64_to_cpu(pkt->hdr.dst_cid),
0846                        le32_to_cpu(pkt->hdr.dst_port),
0847                        le64_to_cpu(pkt->hdr.src_cid),
0848                        le32_to_cpu(pkt->hdr.src_port));
0849     if (!reply)
0850         return -ENOMEM;
0851 
0852     if (!t) {
0853         virtio_transport_free_pkt(reply);
0854         return -ENOTCONN;
0855     }
0856 
0857     return t->send_pkt(reply);
0858 }
0859 
0860 /* This function should be called with sk_lock held and SOCK_DONE set */
0861 static void virtio_transport_remove_sock(struct vsock_sock *vsk)
0862 {
0863     struct virtio_vsock_sock *vvs = vsk->trans;
0864     struct virtio_vsock_pkt *pkt, *tmp;
0865 
0866     /* We don't need to take rx_lock, as the socket is closing and we are
0867      * removing it.
0868      */
0869     list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
0870         list_del(&pkt->list);
0871         virtio_transport_free_pkt(pkt);
0872     }
0873 
0874     vsock_remove_sock(vsk);
0875 }
0876 
0877 static void virtio_transport_wait_close(struct sock *sk, long timeout)
0878 {
0879     if (timeout) {
0880         DEFINE_WAIT_FUNC(wait, woken_wake_function);
0881 
0882         add_wait_queue(sk_sleep(sk), &wait);
0883 
0884         do {
0885             if (sk_wait_event(sk, &timeout,
0886                       sock_flag(sk, SOCK_DONE), &wait))
0887                 break;
0888         } while (!signal_pending(current) && timeout);
0889 
0890         remove_wait_queue(sk_sleep(sk), &wait);
0891     }
0892 }
0893 
0894 static void virtio_transport_do_close(struct vsock_sock *vsk,
0895                       bool cancel_timeout)
0896 {
0897     struct sock *sk = sk_vsock(vsk);
0898 
0899     sock_set_flag(sk, SOCK_DONE);
0900     vsk->peer_shutdown = SHUTDOWN_MASK;
0901     if (vsock_stream_has_data(vsk) <= 0)
0902         sk->sk_state = TCP_CLOSING;
0903     sk->sk_state_change(sk);
0904 
0905     if (vsk->close_work_scheduled &&
0906         (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
0907         vsk->close_work_scheduled = false;
0908 
0909         virtio_transport_remove_sock(vsk);
0910 
0911         /* Release refcnt obtained when we scheduled the timeout */
0912         sock_put(sk);
0913     }
0914 }
0915 
0916 static void virtio_transport_close_timeout(struct work_struct *work)
0917 {
0918     struct vsock_sock *vsk =
0919         container_of(work, struct vsock_sock, close_work.work);
0920     struct sock *sk = sk_vsock(vsk);
0921 
0922     sock_hold(sk);
0923     lock_sock(sk);
0924 
0925     if (!sock_flag(sk, SOCK_DONE)) {
0926         (void)virtio_transport_reset(vsk, NULL);
0927 
0928         virtio_transport_do_close(vsk, false);
0929     }
0930 
0931     vsk->close_work_scheduled = false;
0932 
0933     release_sock(sk);
0934     sock_put(sk);
0935 }
0936 
0937 /* User context, vsk->sk is locked */
0938 static bool virtio_transport_close(struct vsock_sock *vsk)
0939 {
0940     struct sock *sk = &vsk->sk;
0941 
0942     if (!(sk->sk_state == TCP_ESTABLISHED ||
0943           sk->sk_state == TCP_CLOSING))
0944         return true;
0945 
0946     /* Already received SHUTDOWN from peer, reply with RST */
0947     if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
0948         (void)virtio_transport_reset(vsk, NULL);
0949         return true;
0950     }
0951 
0952     if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
0953         (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
0954 
0955     if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
0956         virtio_transport_wait_close(sk, sk->sk_lingertime);
0957 
0958     if (sock_flag(sk, SOCK_DONE)) {
0959         return true;
0960     }
0961 
0962     sock_hold(sk);
0963     INIT_DELAYED_WORK(&vsk->close_work,
0964               virtio_transport_close_timeout);
0965     vsk->close_work_scheduled = true;
0966     schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
0967     return false;
0968 }
0969 
0970 void virtio_transport_release(struct vsock_sock *vsk)
0971 {
0972     struct sock *sk = &vsk->sk;
0973     bool remove_sock = true;
0974 
0975     if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
0976         remove_sock = virtio_transport_close(vsk);
0977 
0978     if (remove_sock) {
0979         sock_set_flag(sk, SOCK_DONE);
0980         virtio_transport_remove_sock(vsk);
0981     }
0982 }
0983 EXPORT_SYMBOL_GPL(virtio_transport_release);
0984 
0985 static int
0986 virtio_transport_recv_connecting(struct sock *sk,
0987                  struct virtio_vsock_pkt *pkt)
0988 {
0989     struct vsock_sock *vsk = vsock_sk(sk);
0990     int err;
0991     int skerr;
0992 
0993     switch (le16_to_cpu(pkt->hdr.op)) {
0994     case VIRTIO_VSOCK_OP_RESPONSE:
0995         sk->sk_state = TCP_ESTABLISHED;
0996         sk->sk_socket->state = SS_CONNECTED;
0997         vsock_insert_connected(vsk);
0998         sk->sk_state_change(sk);
0999         break;
1000     case VIRTIO_VSOCK_OP_INVALID:
1001         break;
1002     case VIRTIO_VSOCK_OP_RST:
1003         skerr = ECONNRESET;
1004         err = 0;
1005         goto destroy;
1006     default:
1007         skerr = EPROTO;
1008         err = -EINVAL;
1009         goto destroy;
1010     }
1011     return 0;
1012 
1013 destroy:
1014     virtio_transport_reset(vsk, pkt);
1015     sk->sk_state = TCP_CLOSE;
1016     sk->sk_err = skerr;
1017     sk_error_report(sk);
1018     return err;
1019 }
1020 
1021 static void
1022 virtio_transport_recv_enqueue(struct vsock_sock *vsk,
1023                   struct virtio_vsock_pkt *pkt)
1024 {
1025     struct virtio_vsock_sock *vvs = vsk->trans;
1026     bool can_enqueue, free_pkt = false;
1027 
1028     pkt->len = le32_to_cpu(pkt->hdr.len);
1029     pkt->off = 0;
1030 
1031     spin_lock_bh(&vvs->rx_lock);
1032 
1033     can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
1034     if (!can_enqueue) {
1035         free_pkt = true;
1036         goto out;
1037     }
1038 
1039     if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)
1040         vvs->msg_count++;
1041 
1042     /* Try to copy small packets into the buffer of last packet queued,
1043      * to avoid wasting memory queueing the entire buffer with a small
1044      * payload.
1045      */
1046     if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
1047         struct virtio_vsock_pkt *last_pkt;
1048 
1049         last_pkt = list_last_entry(&vvs->rx_queue,
1050                        struct virtio_vsock_pkt, list);
1051 
1052         /* If there is space in the last packet queued, we copy the
1053          * new packet in its buffer. We avoid this if the last packet
1054          * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is
1055          * delimiter of SEQPACKET message, so 'pkt' is the first packet
1056          * of a new message.
1057          */
1058         if ((pkt->len <= last_pkt->buf_len - last_pkt->len) &&
1059             !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)) {
1060             memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
1061                    pkt->len);
1062             last_pkt->len += pkt->len;
1063             free_pkt = true;
1064             last_pkt->hdr.flags |= pkt->hdr.flags;
1065             goto out;
1066         }
1067     }
1068 
1069     list_add_tail(&pkt->list, &vvs->rx_queue);
1070 
1071 out:
1072     spin_unlock_bh(&vvs->rx_lock);
1073     if (free_pkt)
1074         virtio_transport_free_pkt(pkt);
1075 }
1076 
1077 static int
1078 virtio_transport_recv_connected(struct sock *sk,
1079                 struct virtio_vsock_pkt *pkt)
1080 {
1081     struct vsock_sock *vsk = vsock_sk(sk);
1082     int err = 0;
1083 
1084     switch (le16_to_cpu(pkt->hdr.op)) {
1085     case VIRTIO_VSOCK_OP_RW:
1086         virtio_transport_recv_enqueue(vsk, pkt);
1087         sk->sk_data_ready(sk);
1088         return err;
1089     case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
1090         virtio_transport_send_credit_update(vsk);
1091         break;
1092     case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
1093         sk->sk_write_space(sk);
1094         break;
1095     case VIRTIO_VSOCK_OP_SHUTDOWN:
1096         if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
1097             vsk->peer_shutdown |= RCV_SHUTDOWN;
1098         if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
1099             vsk->peer_shutdown |= SEND_SHUTDOWN;
1100         if (vsk->peer_shutdown == SHUTDOWN_MASK &&
1101             vsock_stream_has_data(vsk) <= 0 &&
1102             !sock_flag(sk, SOCK_DONE)) {
1103             (void)virtio_transport_reset(vsk, NULL);
1104 
1105             virtio_transport_do_close(vsk, true);
1106         }
1107         if (le32_to_cpu(pkt->hdr.flags))
1108             sk->sk_state_change(sk);
1109         break;
1110     case VIRTIO_VSOCK_OP_RST:
1111         virtio_transport_do_close(vsk, true);
1112         break;
1113     default:
1114         err = -EINVAL;
1115         break;
1116     }
1117 
1118     virtio_transport_free_pkt(pkt);
1119     return err;
1120 }
1121 
1122 static void
1123 virtio_transport_recv_disconnecting(struct sock *sk,
1124                     struct virtio_vsock_pkt *pkt)
1125 {
1126     struct vsock_sock *vsk = vsock_sk(sk);
1127 
1128     if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
1129         virtio_transport_do_close(vsk, true);
1130 }
1131 
1132 static int
1133 virtio_transport_send_response(struct vsock_sock *vsk,
1134                    struct virtio_vsock_pkt *pkt)
1135 {
1136     struct virtio_vsock_pkt_info info = {
1137         .op = VIRTIO_VSOCK_OP_RESPONSE,
1138         .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
1139         .remote_port = le32_to_cpu(pkt->hdr.src_port),
1140         .reply = true,
1141         .vsk = vsk,
1142     };
1143 
1144     return virtio_transport_send_pkt_info(vsk, &info);
1145 }
1146 
1147 static bool virtio_transport_space_update(struct sock *sk,
1148                       struct virtio_vsock_pkt *pkt)
1149 {
1150     struct vsock_sock *vsk = vsock_sk(sk);
1151     struct virtio_vsock_sock *vvs = vsk->trans;
1152     bool space_available;
1153 
1154     /* Listener sockets are not associated with any transport, so we are
1155      * not able to take the state to see if there is space available in the
1156      * remote peer, but since they are only used to receive requests, we
1157      * can assume that there is always space available in the other peer.
1158      */
1159     if (!vvs)
1160         return true;
1161 
1162     /* buf_alloc and fwd_cnt is always included in the hdr */
1163     spin_lock_bh(&vvs->tx_lock);
1164     vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
1165     vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
1166     space_available = virtio_transport_has_space(vsk);
1167     spin_unlock_bh(&vvs->tx_lock);
1168     return space_available;
1169 }
1170 
1171 /* Handle server socket */
1172 static int
1173 virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
1174                  struct virtio_transport *t)
1175 {
1176     struct vsock_sock *vsk = vsock_sk(sk);
1177     struct vsock_sock *vchild;
1178     struct sock *child;
1179     int ret;
1180 
1181     if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
1182         virtio_transport_reset_no_sock(t, pkt);
1183         return -EINVAL;
1184     }
1185 
1186     if (sk_acceptq_is_full(sk)) {
1187         virtio_transport_reset_no_sock(t, pkt);
1188         return -ENOMEM;
1189     }
1190 
1191     child = vsock_create_connected(sk);
1192     if (!child) {
1193         virtio_transport_reset_no_sock(t, pkt);
1194         return -ENOMEM;
1195     }
1196 
1197     sk_acceptq_added(sk);
1198 
1199     lock_sock_nested(child, SINGLE_DEPTH_NESTING);
1200 
1201     child->sk_state = TCP_ESTABLISHED;
1202 
1203     vchild = vsock_sk(child);
1204     vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
1205             le32_to_cpu(pkt->hdr.dst_port));
1206     vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
1207             le32_to_cpu(pkt->hdr.src_port));
1208 
1209     ret = vsock_assign_transport(vchild, vsk);
1210     /* Transport assigned (looking at remote_addr) must be the same
1211      * where we received the request.
1212      */
1213     if (ret || vchild->transport != &t->transport) {
1214         release_sock(child);
1215         virtio_transport_reset_no_sock(t, pkt);
1216         sock_put(child);
1217         return ret;
1218     }
1219 
1220     if (virtio_transport_space_update(child, pkt))
1221         child->sk_write_space(child);
1222 
1223     vsock_insert_connected(vchild);
1224     vsock_enqueue_accept(sk, child);
1225     virtio_transport_send_response(vchild, pkt);
1226 
1227     release_sock(child);
1228 
1229     sk->sk_data_ready(sk);
1230     return 0;
1231 }
1232 
1233 static bool virtio_transport_valid_type(u16 type)
1234 {
1235     return (type == VIRTIO_VSOCK_TYPE_STREAM) ||
1236            (type == VIRTIO_VSOCK_TYPE_SEQPACKET);
1237 }
1238 
1239 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
1240  * lock.
1241  */
1242 void virtio_transport_recv_pkt(struct virtio_transport *t,
1243                    struct virtio_vsock_pkt *pkt)
1244 {
1245     struct sockaddr_vm src, dst;
1246     struct vsock_sock *vsk;
1247     struct sock *sk;
1248     bool space_available;
1249 
1250     vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
1251             le32_to_cpu(pkt->hdr.src_port));
1252     vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
1253             le32_to_cpu(pkt->hdr.dst_port));
1254 
1255     trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
1256                     dst.svm_cid, dst.svm_port,
1257                     le32_to_cpu(pkt->hdr.len),
1258                     le16_to_cpu(pkt->hdr.type),
1259                     le16_to_cpu(pkt->hdr.op),
1260                     le32_to_cpu(pkt->hdr.flags),
1261                     le32_to_cpu(pkt->hdr.buf_alloc),
1262                     le32_to_cpu(pkt->hdr.fwd_cnt));
1263 
1264     if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) {
1265         (void)virtio_transport_reset_no_sock(t, pkt);
1266         goto free_pkt;
1267     }
1268 
1269     /* The socket must be in connected or bound table
1270      * otherwise send reset back
1271      */
1272     sk = vsock_find_connected_socket(&src, &dst);
1273     if (!sk) {
1274         sk = vsock_find_bound_socket(&dst);
1275         if (!sk) {
1276             (void)virtio_transport_reset_no_sock(t, pkt);
1277             goto free_pkt;
1278         }
1279     }
1280 
1281     if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) {
1282         (void)virtio_transport_reset_no_sock(t, pkt);
1283         sock_put(sk);
1284         goto free_pkt;
1285     }
1286 
1287     vsk = vsock_sk(sk);
1288 
1289     lock_sock(sk);
1290 
1291     /* Check if sk has been closed before lock_sock */
1292     if (sock_flag(sk, SOCK_DONE)) {
1293         (void)virtio_transport_reset_no_sock(t, pkt);
1294         release_sock(sk);
1295         sock_put(sk);
1296         goto free_pkt;
1297     }
1298 
1299     space_available = virtio_transport_space_update(sk, pkt);
1300 
1301     /* Update CID in case it has changed after a transport reset event */
1302     if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
1303         vsk->local_addr.svm_cid = dst.svm_cid;
1304 
1305     if (space_available)
1306         sk->sk_write_space(sk);
1307 
1308     switch (sk->sk_state) {
1309     case TCP_LISTEN:
1310         virtio_transport_recv_listen(sk, pkt, t);
1311         virtio_transport_free_pkt(pkt);
1312         break;
1313     case TCP_SYN_SENT:
1314         virtio_transport_recv_connecting(sk, pkt);
1315         virtio_transport_free_pkt(pkt);
1316         break;
1317     case TCP_ESTABLISHED:
1318         virtio_transport_recv_connected(sk, pkt);
1319         break;
1320     case TCP_CLOSING:
1321         virtio_transport_recv_disconnecting(sk, pkt);
1322         virtio_transport_free_pkt(pkt);
1323         break;
1324     default:
1325         (void)virtio_transport_reset_no_sock(t, pkt);
1326         virtio_transport_free_pkt(pkt);
1327         break;
1328     }
1329 
1330     release_sock(sk);
1331 
1332     /* Release refcnt obtained when we fetched this socket out of the
1333      * bound or connected list.
1334      */
1335     sock_put(sk);
1336     return;
1337 
1338 free_pkt:
1339     virtio_transport_free_pkt(pkt);
1340 }
1341 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
1342 
1343 void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
1344 {
1345     kfree(pkt->buf);
1346     kfree(pkt);
1347 }
1348 EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
1349 
1350 MODULE_LICENSE("GPL v2");
1351 MODULE_AUTHOR("Asias He");
1352 MODULE_DESCRIPTION("common code for virtio vsock");