Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * virtio transport for vsock
0004  *
0005  * Copyright (C) 2013-2015 Red Hat, Inc.
0006  * Author: Asias He <asias@redhat.com>
0007  *         Stefan Hajnoczi <stefanha@redhat.com>
0008  *
0009  * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
0010  * early virtio-vsock proof-of-concept bits.
0011  */
0012 #include <linux/spinlock.h>
0013 #include <linux/module.h>
0014 #include <linux/list.h>
0015 #include <linux/atomic.h>
0016 #include <linux/virtio.h>
0017 #include <linux/virtio_ids.h>
0018 #include <linux/virtio_config.h>
0019 #include <linux/virtio_vsock.h>
0020 #include <net/sock.h>
0021 #include <linux/mutex.h>
0022 #include <net/af_vsock.h>
0023 
0024 static struct workqueue_struct *virtio_vsock_workqueue;
0025 static struct virtio_vsock __rcu *the_virtio_vsock;
0026 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
0027 static struct virtio_transport virtio_transport; /* forward declaration */
0028 
0029 struct virtio_vsock {
0030     struct virtio_device *vdev;
0031     struct virtqueue *vqs[VSOCK_VQ_MAX];
0032 
0033     /* Virtqueue processing is deferred to a workqueue */
0034     struct work_struct tx_work;
0035     struct work_struct rx_work;
0036     struct work_struct event_work;
0037 
0038     /* The following fields are protected by tx_lock.  vqs[VSOCK_VQ_TX]
0039      * must be accessed with tx_lock held.
0040      */
0041     struct mutex tx_lock;
0042     bool tx_run;
0043 
0044     struct work_struct send_pkt_work;
0045     spinlock_t send_pkt_list_lock;
0046     struct list_head send_pkt_list;
0047 
0048     atomic_t queued_replies;
0049 
0050     /* The following fields are protected by rx_lock.  vqs[VSOCK_VQ_RX]
0051      * must be accessed with rx_lock held.
0052      */
0053     struct mutex rx_lock;
0054     bool rx_run;
0055     int rx_buf_nr;
0056     int rx_buf_max_nr;
0057 
0058     /* The following fields are protected by event_lock.
0059      * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
0060      */
0061     struct mutex event_lock;
0062     bool event_run;
0063     struct virtio_vsock_event event_list[8];
0064 
0065     u32 guest_cid;
0066     bool seqpacket_allow;
0067 };
0068 
0069 static u32 virtio_transport_get_local_cid(void)
0070 {
0071     struct virtio_vsock *vsock;
0072     u32 ret;
0073 
0074     rcu_read_lock();
0075     vsock = rcu_dereference(the_virtio_vsock);
0076     if (!vsock) {
0077         ret = VMADDR_CID_ANY;
0078         goto out_rcu;
0079     }
0080 
0081     ret = vsock->guest_cid;
0082 out_rcu:
0083     rcu_read_unlock();
0084     return ret;
0085 }
0086 
0087 static void
0088 virtio_transport_send_pkt_work(struct work_struct *work)
0089 {
0090     struct virtio_vsock *vsock =
0091         container_of(work, struct virtio_vsock, send_pkt_work);
0092     struct virtqueue *vq;
0093     bool added = false;
0094     bool restart_rx = false;
0095 
0096     mutex_lock(&vsock->tx_lock);
0097 
0098     if (!vsock->tx_run)
0099         goto out;
0100 
0101     vq = vsock->vqs[VSOCK_VQ_TX];
0102 
0103     for (;;) {
0104         struct virtio_vsock_pkt *pkt;
0105         struct scatterlist hdr, buf, *sgs[2];
0106         int ret, in_sg = 0, out_sg = 0;
0107         bool reply;
0108 
0109         spin_lock_bh(&vsock->send_pkt_list_lock);
0110         if (list_empty(&vsock->send_pkt_list)) {
0111             spin_unlock_bh(&vsock->send_pkt_list_lock);
0112             break;
0113         }
0114 
0115         pkt = list_first_entry(&vsock->send_pkt_list,
0116                        struct virtio_vsock_pkt, list);
0117         list_del_init(&pkt->list);
0118         spin_unlock_bh(&vsock->send_pkt_list_lock);
0119 
0120         virtio_transport_deliver_tap_pkt(pkt);
0121 
0122         reply = pkt->reply;
0123 
0124         sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
0125         sgs[out_sg++] = &hdr;
0126         if (pkt->buf) {
0127             sg_init_one(&buf, pkt->buf, pkt->len);
0128             sgs[out_sg++] = &buf;
0129         }
0130 
0131         ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
0132         /* Usually this means that there is no more space available in
0133          * the vq
0134          */
0135         if (ret < 0) {
0136             spin_lock_bh(&vsock->send_pkt_list_lock);
0137             list_add(&pkt->list, &vsock->send_pkt_list);
0138             spin_unlock_bh(&vsock->send_pkt_list_lock);
0139             break;
0140         }
0141 
0142         if (reply) {
0143             struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
0144             int val;
0145 
0146             val = atomic_dec_return(&vsock->queued_replies);
0147 
0148             /* Do we now have resources to resume rx processing? */
0149             if (val + 1 == virtqueue_get_vring_size(rx_vq))
0150                 restart_rx = true;
0151         }
0152 
0153         added = true;
0154     }
0155 
0156     if (added)
0157         virtqueue_kick(vq);
0158 
0159 out:
0160     mutex_unlock(&vsock->tx_lock);
0161 
0162     if (restart_rx)
0163         queue_work(virtio_vsock_workqueue, &vsock->rx_work);
0164 }
0165 
0166 static int
0167 virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
0168 {
0169     struct virtio_vsock *vsock;
0170     int len = pkt->len;
0171 
0172     rcu_read_lock();
0173     vsock = rcu_dereference(the_virtio_vsock);
0174     if (!vsock) {
0175         virtio_transport_free_pkt(pkt);
0176         len = -ENODEV;
0177         goto out_rcu;
0178     }
0179 
0180     if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
0181         virtio_transport_free_pkt(pkt);
0182         len = -ENODEV;
0183         goto out_rcu;
0184     }
0185 
0186     if (pkt->reply)
0187         atomic_inc(&vsock->queued_replies);
0188 
0189     spin_lock_bh(&vsock->send_pkt_list_lock);
0190     list_add_tail(&pkt->list, &vsock->send_pkt_list);
0191     spin_unlock_bh(&vsock->send_pkt_list_lock);
0192 
0193     queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
0194 
0195 out_rcu:
0196     rcu_read_unlock();
0197     return len;
0198 }
0199 
0200 static int
0201 virtio_transport_cancel_pkt(struct vsock_sock *vsk)
0202 {
0203     struct virtio_vsock *vsock;
0204     struct virtio_vsock_pkt *pkt, *n;
0205     int cnt = 0, ret;
0206     LIST_HEAD(freeme);
0207 
0208     rcu_read_lock();
0209     vsock = rcu_dereference(the_virtio_vsock);
0210     if (!vsock) {
0211         ret = -ENODEV;
0212         goto out_rcu;
0213     }
0214 
0215     spin_lock_bh(&vsock->send_pkt_list_lock);
0216     list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
0217         if (pkt->vsk != vsk)
0218             continue;
0219         list_move(&pkt->list, &freeme);
0220     }
0221     spin_unlock_bh(&vsock->send_pkt_list_lock);
0222 
0223     list_for_each_entry_safe(pkt, n, &freeme, list) {
0224         if (pkt->reply)
0225             cnt++;
0226         list_del(&pkt->list);
0227         virtio_transport_free_pkt(pkt);
0228     }
0229 
0230     if (cnt) {
0231         struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
0232         int new_cnt;
0233 
0234         new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
0235         if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
0236             new_cnt < virtqueue_get_vring_size(rx_vq))
0237             queue_work(virtio_vsock_workqueue, &vsock->rx_work);
0238     }
0239 
0240     ret = 0;
0241 
0242 out_rcu:
0243     rcu_read_unlock();
0244     return ret;
0245 }
0246 
0247 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
0248 {
0249     int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
0250     struct virtio_vsock_pkt *pkt;
0251     struct scatterlist hdr, buf, *sgs[2];
0252     struct virtqueue *vq;
0253     int ret;
0254 
0255     vq = vsock->vqs[VSOCK_VQ_RX];
0256 
0257     do {
0258         pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
0259         if (!pkt)
0260             break;
0261 
0262         pkt->buf = kmalloc(buf_len, GFP_KERNEL);
0263         if (!pkt->buf) {
0264             virtio_transport_free_pkt(pkt);
0265             break;
0266         }
0267 
0268         pkt->buf_len = buf_len;
0269         pkt->len = buf_len;
0270 
0271         sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
0272         sgs[0] = &hdr;
0273 
0274         sg_init_one(&buf, pkt->buf, buf_len);
0275         sgs[1] = &buf;
0276         ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
0277         if (ret) {
0278             virtio_transport_free_pkt(pkt);
0279             break;
0280         }
0281         vsock->rx_buf_nr++;
0282     } while (vq->num_free);
0283     if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
0284         vsock->rx_buf_max_nr = vsock->rx_buf_nr;
0285     virtqueue_kick(vq);
0286 }
0287 
0288 static void virtio_transport_tx_work(struct work_struct *work)
0289 {
0290     struct virtio_vsock *vsock =
0291         container_of(work, struct virtio_vsock, tx_work);
0292     struct virtqueue *vq;
0293     bool added = false;
0294 
0295     vq = vsock->vqs[VSOCK_VQ_TX];
0296     mutex_lock(&vsock->tx_lock);
0297 
0298     if (!vsock->tx_run)
0299         goto out;
0300 
0301     do {
0302         struct virtio_vsock_pkt *pkt;
0303         unsigned int len;
0304 
0305         virtqueue_disable_cb(vq);
0306         while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
0307             virtio_transport_free_pkt(pkt);
0308             added = true;
0309         }
0310     } while (!virtqueue_enable_cb(vq));
0311 
0312 out:
0313     mutex_unlock(&vsock->tx_lock);
0314 
0315     if (added)
0316         queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
0317 }
0318 
0319 /* Is there space left for replies to rx packets? */
0320 static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
0321 {
0322     struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
0323     int val;
0324 
0325     smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
0326     val = atomic_read(&vsock->queued_replies);
0327 
0328     return val < virtqueue_get_vring_size(vq);
0329 }
0330 
0331 /* event_lock must be held */
0332 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
0333                        struct virtio_vsock_event *event)
0334 {
0335     struct scatterlist sg;
0336     struct virtqueue *vq;
0337 
0338     vq = vsock->vqs[VSOCK_VQ_EVENT];
0339 
0340     sg_init_one(&sg, event, sizeof(*event));
0341 
0342     return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
0343 }
0344 
0345 /* event_lock must be held */
0346 static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
0347 {
0348     size_t i;
0349 
0350     for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
0351         struct virtio_vsock_event *event = &vsock->event_list[i];
0352 
0353         virtio_vsock_event_fill_one(vsock, event);
0354     }
0355 
0356     virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
0357 }
0358 
0359 static void virtio_vsock_reset_sock(struct sock *sk)
0360 {
0361     /* vmci_transport.c doesn't take sk_lock here either.  At least we're
0362      * under vsock_table_lock so the sock cannot disappear while we're
0363      * executing.
0364      */
0365 
0366     sk->sk_state = TCP_CLOSE;
0367     sk->sk_err = ECONNRESET;
0368     sk_error_report(sk);
0369 }
0370 
0371 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
0372 {
0373     struct virtio_device *vdev = vsock->vdev;
0374     __le64 guest_cid;
0375 
0376     vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
0377               &guest_cid, sizeof(guest_cid));
0378     vsock->guest_cid = le64_to_cpu(guest_cid);
0379 }
0380 
0381 /* event_lock must be held */
0382 static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
0383                       struct virtio_vsock_event *event)
0384 {
0385     switch (le32_to_cpu(event->id)) {
0386     case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
0387         virtio_vsock_update_guest_cid(vsock);
0388         vsock_for_each_connected_socket(&virtio_transport.transport,
0389                         virtio_vsock_reset_sock);
0390         break;
0391     }
0392 }
0393 
0394 static void virtio_transport_event_work(struct work_struct *work)
0395 {
0396     struct virtio_vsock *vsock =
0397         container_of(work, struct virtio_vsock, event_work);
0398     struct virtqueue *vq;
0399 
0400     vq = vsock->vqs[VSOCK_VQ_EVENT];
0401 
0402     mutex_lock(&vsock->event_lock);
0403 
0404     if (!vsock->event_run)
0405         goto out;
0406 
0407     do {
0408         struct virtio_vsock_event *event;
0409         unsigned int len;
0410 
0411         virtqueue_disable_cb(vq);
0412         while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
0413             if (len == sizeof(*event))
0414                 virtio_vsock_event_handle(vsock, event);
0415 
0416             virtio_vsock_event_fill_one(vsock, event);
0417         }
0418     } while (!virtqueue_enable_cb(vq));
0419 
0420     virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
0421 out:
0422     mutex_unlock(&vsock->event_lock);
0423 }
0424 
0425 static void virtio_vsock_event_done(struct virtqueue *vq)
0426 {
0427     struct virtio_vsock *vsock = vq->vdev->priv;
0428 
0429     if (!vsock)
0430         return;
0431     queue_work(virtio_vsock_workqueue, &vsock->event_work);
0432 }
0433 
0434 static void virtio_vsock_tx_done(struct virtqueue *vq)
0435 {
0436     struct virtio_vsock *vsock = vq->vdev->priv;
0437 
0438     if (!vsock)
0439         return;
0440     queue_work(virtio_vsock_workqueue, &vsock->tx_work);
0441 }
0442 
0443 static void virtio_vsock_rx_done(struct virtqueue *vq)
0444 {
0445     struct virtio_vsock *vsock = vq->vdev->priv;
0446 
0447     if (!vsock)
0448         return;
0449     queue_work(virtio_vsock_workqueue, &vsock->rx_work);
0450 }
0451 
0452 static bool virtio_transport_seqpacket_allow(u32 remote_cid);
0453 
0454 static struct virtio_transport virtio_transport = {
0455     .transport = {
0456         .module                   = THIS_MODULE,
0457 
0458         .get_local_cid            = virtio_transport_get_local_cid,
0459 
0460         .init                     = virtio_transport_do_socket_init,
0461         .destruct                 = virtio_transport_destruct,
0462         .release                  = virtio_transport_release,
0463         .connect                  = virtio_transport_connect,
0464         .shutdown                 = virtio_transport_shutdown,
0465         .cancel_pkt               = virtio_transport_cancel_pkt,
0466 
0467         .dgram_bind               = virtio_transport_dgram_bind,
0468         .dgram_dequeue            = virtio_transport_dgram_dequeue,
0469         .dgram_enqueue            = virtio_transport_dgram_enqueue,
0470         .dgram_allow              = virtio_transport_dgram_allow,
0471 
0472         .stream_dequeue           = virtio_transport_stream_dequeue,
0473         .stream_enqueue           = virtio_transport_stream_enqueue,
0474         .stream_has_data          = virtio_transport_stream_has_data,
0475         .stream_has_space         = virtio_transport_stream_has_space,
0476         .stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
0477         .stream_is_active         = virtio_transport_stream_is_active,
0478         .stream_allow             = virtio_transport_stream_allow,
0479 
0480         .seqpacket_dequeue        = virtio_transport_seqpacket_dequeue,
0481         .seqpacket_enqueue        = virtio_transport_seqpacket_enqueue,
0482         .seqpacket_allow          = virtio_transport_seqpacket_allow,
0483         .seqpacket_has_data       = virtio_transport_seqpacket_has_data,
0484 
0485         .notify_poll_in           = virtio_transport_notify_poll_in,
0486         .notify_poll_out          = virtio_transport_notify_poll_out,
0487         .notify_recv_init         = virtio_transport_notify_recv_init,
0488         .notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
0489         .notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
0490         .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
0491         .notify_send_init         = virtio_transport_notify_send_init,
0492         .notify_send_pre_block    = virtio_transport_notify_send_pre_block,
0493         .notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
0494         .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
0495         .notify_buffer_size       = virtio_transport_notify_buffer_size,
0496     },
0497 
0498     .send_pkt = virtio_transport_send_pkt,
0499 };
0500 
0501 static bool virtio_transport_seqpacket_allow(u32 remote_cid)
0502 {
0503     struct virtio_vsock *vsock;
0504     bool seqpacket_allow;
0505 
0506     seqpacket_allow = false;
0507     rcu_read_lock();
0508     vsock = rcu_dereference(the_virtio_vsock);
0509     if (vsock)
0510         seqpacket_allow = vsock->seqpacket_allow;
0511     rcu_read_unlock();
0512 
0513     return seqpacket_allow;
0514 }
0515 
0516 static void virtio_transport_rx_work(struct work_struct *work)
0517 {
0518     struct virtio_vsock *vsock =
0519         container_of(work, struct virtio_vsock, rx_work);
0520     struct virtqueue *vq;
0521 
0522     vq = vsock->vqs[VSOCK_VQ_RX];
0523 
0524     mutex_lock(&vsock->rx_lock);
0525 
0526     if (!vsock->rx_run)
0527         goto out;
0528 
0529     do {
0530         virtqueue_disable_cb(vq);
0531         for (;;) {
0532             struct virtio_vsock_pkt *pkt;
0533             unsigned int len;
0534 
0535             if (!virtio_transport_more_replies(vsock)) {
0536                 /* Stop rx until the device processes already
0537                  * pending replies.  Leave rx virtqueue
0538                  * callbacks disabled.
0539                  */
0540                 goto out;
0541             }
0542 
0543             pkt = virtqueue_get_buf(vq, &len);
0544             if (!pkt) {
0545                 break;
0546             }
0547 
0548             vsock->rx_buf_nr--;
0549 
0550             /* Drop short/long packets */
0551             if (unlikely(len < sizeof(pkt->hdr) ||
0552                      len > sizeof(pkt->hdr) + pkt->len)) {
0553                 virtio_transport_free_pkt(pkt);
0554                 continue;
0555             }
0556 
0557             pkt->len = len - sizeof(pkt->hdr);
0558             virtio_transport_deliver_tap_pkt(pkt);
0559             virtio_transport_recv_pkt(&virtio_transport, pkt);
0560         }
0561     } while (!virtqueue_enable_cb(vq));
0562 
0563 out:
0564     if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
0565         virtio_vsock_rx_fill(vsock);
0566     mutex_unlock(&vsock->rx_lock);
0567 }
0568 
0569 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
0570 {
0571     struct virtio_device *vdev = vsock->vdev;
0572     static const char * const names[] = {
0573         "rx",
0574         "tx",
0575         "event",
0576     };
0577     vq_callback_t *callbacks[] = {
0578         virtio_vsock_rx_done,
0579         virtio_vsock_tx_done,
0580         virtio_vsock_event_done,
0581     };
0582     int ret;
0583 
0584     ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names,
0585                   NULL);
0586     if (ret < 0)
0587         return ret;
0588 
0589     virtio_vsock_update_guest_cid(vsock);
0590 
0591     virtio_device_ready(vdev);
0592 
0593     mutex_lock(&vsock->tx_lock);
0594     vsock->tx_run = true;
0595     mutex_unlock(&vsock->tx_lock);
0596 
0597     mutex_lock(&vsock->rx_lock);
0598     virtio_vsock_rx_fill(vsock);
0599     vsock->rx_run = true;
0600     mutex_unlock(&vsock->rx_lock);
0601 
0602     mutex_lock(&vsock->event_lock);
0603     virtio_vsock_event_fill(vsock);
0604     vsock->event_run = true;
0605     mutex_unlock(&vsock->event_lock);
0606 
0607     return 0;
0608 }
0609 
0610 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
0611 {
0612     struct virtio_device *vdev = vsock->vdev;
0613     struct virtio_vsock_pkt *pkt;
0614 
0615     /* Reset all connected sockets when the VQs disappear */
0616     vsock_for_each_connected_socket(&virtio_transport.transport,
0617                     virtio_vsock_reset_sock);
0618 
0619     /* Stop all work handlers to make sure no one is accessing the device,
0620      * so we can safely call virtio_reset_device().
0621      */
0622     mutex_lock(&vsock->rx_lock);
0623     vsock->rx_run = false;
0624     mutex_unlock(&vsock->rx_lock);
0625 
0626     mutex_lock(&vsock->tx_lock);
0627     vsock->tx_run = false;
0628     mutex_unlock(&vsock->tx_lock);
0629 
0630     mutex_lock(&vsock->event_lock);
0631     vsock->event_run = false;
0632     mutex_unlock(&vsock->event_lock);
0633 
0634     /* Flush all device writes and interrupts, device will not use any
0635      * more buffers.
0636      */
0637     virtio_reset_device(vdev);
0638 
0639     mutex_lock(&vsock->rx_lock);
0640     while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
0641         virtio_transport_free_pkt(pkt);
0642     mutex_unlock(&vsock->rx_lock);
0643 
0644     mutex_lock(&vsock->tx_lock);
0645     while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
0646         virtio_transport_free_pkt(pkt);
0647     mutex_unlock(&vsock->tx_lock);
0648 
0649     spin_lock_bh(&vsock->send_pkt_list_lock);
0650     while (!list_empty(&vsock->send_pkt_list)) {
0651         pkt = list_first_entry(&vsock->send_pkt_list,
0652                        struct virtio_vsock_pkt, list);
0653         list_del(&pkt->list);
0654         virtio_transport_free_pkt(pkt);
0655     }
0656     spin_unlock_bh(&vsock->send_pkt_list_lock);
0657 
0658     /* Delete virtqueues and flush outstanding callbacks if any */
0659     vdev->config->del_vqs(vdev);
0660 }
0661 
0662 static int virtio_vsock_probe(struct virtio_device *vdev)
0663 {
0664     struct virtio_vsock *vsock = NULL;
0665     int ret;
0666 
0667     ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
0668     if (ret)
0669         return ret;
0670 
0671     /* Only one virtio-vsock device per guest is supported */
0672     if (rcu_dereference_protected(the_virtio_vsock,
0673                 lockdep_is_held(&the_virtio_vsock_mutex))) {
0674         ret = -EBUSY;
0675         goto out;
0676     }
0677 
0678     vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
0679     if (!vsock) {
0680         ret = -ENOMEM;
0681         goto out;
0682     }
0683 
0684     vsock->vdev = vdev;
0685 
0686     vsock->rx_buf_nr = 0;
0687     vsock->rx_buf_max_nr = 0;
0688     atomic_set(&vsock->queued_replies, 0);
0689 
0690     mutex_init(&vsock->tx_lock);
0691     mutex_init(&vsock->rx_lock);
0692     mutex_init(&vsock->event_lock);
0693     spin_lock_init(&vsock->send_pkt_list_lock);
0694     INIT_LIST_HEAD(&vsock->send_pkt_list);
0695     INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
0696     INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
0697     INIT_WORK(&vsock->event_work, virtio_transport_event_work);
0698     INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
0699 
0700     if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
0701         vsock->seqpacket_allow = true;
0702 
0703     vdev->priv = vsock;
0704 
0705     ret = virtio_vsock_vqs_init(vsock);
0706     if (ret < 0)
0707         goto out;
0708 
0709     rcu_assign_pointer(the_virtio_vsock, vsock);
0710 
0711     mutex_unlock(&the_virtio_vsock_mutex);
0712 
0713     return 0;
0714 
0715 out:
0716     kfree(vsock);
0717     mutex_unlock(&the_virtio_vsock_mutex);
0718     return ret;
0719 }
0720 
0721 static void virtio_vsock_remove(struct virtio_device *vdev)
0722 {
0723     struct virtio_vsock *vsock = vdev->priv;
0724 
0725     mutex_lock(&the_virtio_vsock_mutex);
0726 
0727     vdev->priv = NULL;
0728     rcu_assign_pointer(the_virtio_vsock, NULL);
0729     synchronize_rcu();
0730 
0731     virtio_vsock_vqs_del(vsock);
0732 
0733     /* Other works can be queued before 'config->del_vqs()', so we flush
0734      * all works before to free the vsock object to avoid use after free.
0735      */
0736     flush_work(&vsock->rx_work);
0737     flush_work(&vsock->tx_work);
0738     flush_work(&vsock->event_work);
0739     flush_work(&vsock->send_pkt_work);
0740 
0741     mutex_unlock(&the_virtio_vsock_mutex);
0742 
0743     kfree(vsock);
0744 }
0745 
0746 #ifdef CONFIG_PM_SLEEP
0747 static int virtio_vsock_freeze(struct virtio_device *vdev)
0748 {
0749     struct virtio_vsock *vsock = vdev->priv;
0750 
0751     mutex_lock(&the_virtio_vsock_mutex);
0752 
0753     rcu_assign_pointer(the_virtio_vsock, NULL);
0754     synchronize_rcu();
0755 
0756     virtio_vsock_vqs_del(vsock);
0757 
0758     mutex_unlock(&the_virtio_vsock_mutex);
0759 
0760     return 0;
0761 }
0762 
0763 static int virtio_vsock_restore(struct virtio_device *vdev)
0764 {
0765     struct virtio_vsock *vsock = vdev->priv;
0766     int ret;
0767 
0768     mutex_lock(&the_virtio_vsock_mutex);
0769 
0770     /* Only one virtio-vsock device per guest is supported */
0771     if (rcu_dereference_protected(the_virtio_vsock,
0772                 lockdep_is_held(&the_virtio_vsock_mutex))) {
0773         ret = -EBUSY;
0774         goto out;
0775     }
0776 
0777     ret = virtio_vsock_vqs_init(vsock);
0778     if (ret < 0)
0779         goto out;
0780 
0781     rcu_assign_pointer(the_virtio_vsock, vsock);
0782 
0783 out:
0784     mutex_unlock(&the_virtio_vsock_mutex);
0785     return ret;
0786 }
0787 #endif /* CONFIG_PM_SLEEP */
0788 
0789 static struct virtio_device_id id_table[] = {
0790     { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
0791     { 0 },
0792 };
0793 
0794 static unsigned int features[] = {
0795     VIRTIO_VSOCK_F_SEQPACKET
0796 };
0797 
0798 static struct virtio_driver virtio_vsock_driver = {
0799     .feature_table = features,
0800     .feature_table_size = ARRAY_SIZE(features),
0801     .driver.name = KBUILD_MODNAME,
0802     .driver.owner = THIS_MODULE,
0803     .id_table = id_table,
0804     .probe = virtio_vsock_probe,
0805     .remove = virtio_vsock_remove,
0806 #ifdef CONFIG_PM_SLEEP
0807     .freeze = virtio_vsock_freeze,
0808     .restore = virtio_vsock_restore,
0809 #endif
0810 };
0811 
0812 static int __init virtio_vsock_init(void)
0813 {
0814     int ret;
0815 
0816     virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
0817     if (!virtio_vsock_workqueue)
0818         return -ENOMEM;
0819 
0820     ret = vsock_core_register(&virtio_transport.transport,
0821                   VSOCK_TRANSPORT_F_G2H);
0822     if (ret)
0823         goto out_wq;
0824 
0825     ret = register_virtio_driver(&virtio_vsock_driver);
0826     if (ret)
0827         goto out_vci;
0828 
0829     return 0;
0830 
0831 out_vci:
0832     vsock_core_unregister(&virtio_transport.transport);
0833 out_wq:
0834     destroy_workqueue(virtio_vsock_workqueue);
0835     return ret;
0836 }
0837 
0838 static void __exit virtio_vsock_exit(void)
0839 {
0840     unregister_virtio_driver(&virtio_vsock_driver);
0841     vsock_core_unregister(&virtio_transport.transport);
0842     destroy_workqueue(virtio_vsock_workqueue);
0843 }
0844 
0845 module_init(virtio_vsock_init);
0846 module_exit(virtio_vsock_exit);
0847 MODULE_LICENSE("GPL v2");
0848 MODULE_AUTHOR("Asias He");
0849 MODULE_DESCRIPTION("virtio transport for vsock");
0850 MODULE_DEVICE_TABLE(virtio, id_table);