Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Hyper-V transport for vsock
0004  *
0005  * Hyper-V Sockets supplies a byte-stream based communication mechanism
0006  * between the host and the VM. This driver implements the necessary
0007  * support in the VM by introducing the new vsock transport.
0008  *
0009  * Copyright (c) 2017, Microsoft Corporation.
0010  */
0011 #include <linux/module.h>
0012 #include <linux/vmalloc.h>
0013 #include <linux/hyperv.h>
0014 #include <net/sock.h>
0015 #include <net/af_vsock.h>
0016 #include <asm/hyperv-tlfs.h>
0017 
0018 /* Older (VMBUS version 'VERSION_WIN10' or before) Windows hosts have some
0019  * stricter requirements on the hv_sock ring buffer size of six 4K pages.
0020  * hyperv-tlfs defines HV_HYP_PAGE_SIZE as 4K. Newer hosts don't have this
0021  * limitation; but, keep the defaults the same for compat.
0022  */
0023 #define RINGBUFFER_HVS_RCV_SIZE (HV_HYP_PAGE_SIZE * 6)
0024 #define RINGBUFFER_HVS_SND_SIZE (HV_HYP_PAGE_SIZE * 6)
0025 #define RINGBUFFER_HVS_MAX_SIZE (HV_HYP_PAGE_SIZE * 64)
0026 
0027 /* The MTU is 16KB per the host side's design */
0028 #define HVS_MTU_SIZE        (1024 * 16)
0029 
0030 /* How long to wait for graceful shutdown of a connection */
0031 #define HVS_CLOSE_TIMEOUT (8 * HZ)
0032 
0033 struct vmpipe_proto_header {
0034     u32 pkt_type;
0035     u32 data_size;
0036 };
0037 
0038 /* For recv, we use the VMBus in-place packet iterator APIs to directly copy
0039  * data from the ringbuffer into the userspace buffer.
0040  */
0041 struct hvs_recv_buf {
0042     /* The header before the payload data */
0043     struct vmpipe_proto_header hdr;
0044 
0045     /* The payload */
0046     u8 data[HVS_MTU_SIZE];
0047 };
0048 
0049 /* We can send up to HVS_MTU_SIZE bytes of payload to the host, but let's use
0050  * a smaller size, i.e. HVS_SEND_BUF_SIZE, to maximize concurrency between the
0051  * guest and the host processing as one VMBUS packet is the smallest processing
0052  * unit.
0053  *
0054  * Note: the buffer can be eliminated in the future when we add new VMBus
0055  * ringbuffer APIs that allow us to directly copy data from userspace buffer
0056  * to VMBus ringbuffer.
0057  */
0058 #define HVS_SEND_BUF_SIZE \
0059         (HV_HYP_PAGE_SIZE - sizeof(struct vmpipe_proto_header))
0060 
0061 struct hvs_send_buf {
0062     /* The header before the payload data */
0063     struct vmpipe_proto_header hdr;
0064 
0065     /* The payload */
0066     u8 data[HVS_SEND_BUF_SIZE];
0067 };
0068 
0069 #define HVS_HEADER_LEN  (sizeof(struct vmpacket_descriptor) + \
0070              sizeof(struct vmpipe_proto_header))
0071 
0072 /* See 'prev_indices' in hv_ringbuffer_read(), hv_ringbuffer_write(), and
0073  * __hv_pkt_iter_next().
0074  */
0075 #define VMBUS_PKT_TRAILER_SIZE  (sizeof(u64))
0076 
0077 #define HVS_PKT_LEN(payload_len)    (HVS_HEADER_LEN + \
0078                      ALIGN((payload_len), 8) + \
0079                      VMBUS_PKT_TRAILER_SIZE)
0080 
0081 /* Upper bound on the size of a VMbus packet for hv_sock */
0082 #define HVS_MAX_PKT_SIZE    HVS_PKT_LEN(HVS_MTU_SIZE)
0083 
0084 union hvs_service_id {
0085     guid_t  srv_id;
0086 
0087     struct {
0088         unsigned int svm_port;
0089         unsigned char b[sizeof(guid_t) - sizeof(unsigned int)];
0090     };
0091 };
0092 
0093 /* Per-socket state (accessed via vsk->trans) */
0094 struct hvsock {
0095     struct vsock_sock *vsk;
0096 
0097     guid_t vm_srv_id;
0098     guid_t host_srv_id;
0099 
0100     struct vmbus_channel *chan;
0101     struct vmpacket_descriptor *recv_desc;
0102 
0103     /* The length of the payload not delivered to userland yet */
0104     u32 recv_data_len;
0105     /* The offset of the payload */
0106     u32 recv_data_off;
0107 
0108     /* Have we sent the zero-length packet (FIN)? */
0109     bool fin_sent;
0110 };
0111 
0112 /* In the VM, we support Hyper-V Sockets with AF_VSOCK, and the endpoint is
0113  * <cid, port> (see struct sockaddr_vm). Note: cid is not really used here:
0114  * when we write apps to connect to the host, we can only use VMADDR_CID_ANY
0115  * or VMADDR_CID_HOST (both are equivalent) as the remote cid, and when we
0116  * write apps to bind() & listen() in the VM, we can only use VMADDR_CID_ANY
0117  * as the local cid.
0118  *
0119  * On the host, Hyper-V Sockets are supported by Winsock AF_HYPERV:
0120  * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-
0121  * guide/make-integration-service, and the endpoint is <VmID, ServiceId> with
0122  * the below sockaddr:
0123  *
0124  * struct SOCKADDR_HV
0125  * {
0126  *    ADDRESS_FAMILY Family;
0127  *    USHORT Reserved;
0128  *    GUID VmId;
0129  *    GUID ServiceId;
0130  * };
0131  * Note: VmID is not used by Linux VM and actually it isn't transmitted via
0132  * VMBus, because here it's obvious the host and the VM can easily identify
0133  * each other. Though the VmID is useful on the host, especially in the case
0134  * of Windows container, Linux VM doesn't need it at all.
0135  *
0136  * To make use of the AF_VSOCK infrastructure in Linux VM, we have to limit
0137  * the available GUID space of SOCKADDR_HV so that we can create a mapping
0138  * between AF_VSOCK port and SOCKADDR_HV Service GUID. The rule of writing
0139  * Hyper-V Sockets apps on the host and in Linux VM is:
0140  *
0141  ****************************************************************************
0142  * The only valid Service GUIDs, from the perspectives of both the host and *
0143  * Linux VM, that can be connected by the other end, must conform to this   *
0144  * format: <port>-facb-11e6-bd58-64006a7986d3.                              *
0145  ****************************************************************************
0146  *
0147  * When we write apps on the host to connect(), the GUID ServiceID is used.
0148  * When we write apps in Linux VM to connect(), we only need to specify the
0149  * port and the driver will form the GUID and use that to request the host.
0150  *
0151  */
0152 
0153 /* 00000000-facb-11e6-bd58-64006a7986d3 */
0154 static const guid_t srv_id_template =
0155     GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
0156           0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3);
0157 
0158 static bool hvs_check_transport(struct vsock_sock *vsk);
0159 
0160 static bool is_valid_srv_id(const guid_t *id)
0161 {
0162     return !memcmp(&id->b[4], &srv_id_template.b[4], sizeof(guid_t) - 4);
0163 }
0164 
0165 static unsigned int get_port_by_srv_id(const guid_t *svr_id)
0166 {
0167     return *((unsigned int *)svr_id);
0168 }
0169 
0170 static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id)
0171 {
0172     unsigned int port = get_port_by_srv_id(svr_id);
0173 
0174     vsock_addr_init(addr, VMADDR_CID_ANY, port);
0175 }
0176 
0177 static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
0178 {
0179     set_channel_pending_send_size(chan,
0180                       HVS_PKT_LEN(HVS_SEND_BUF_SIZE));
0181 
0182     virt_mb();
0183 }
0184 
0185 static bool hvs_channel_readable(struct vmbus_channel *chan)
0186 {
0187     u32 readable = hv_get_bytes_to_read(&chan->inbound);
0188 
0189     /* 0-size payload means FIN */
0190     return readable >= HVS_PKT_LEN(0);
0191 }
0192 
0193 static int hvs_channel_readable_payload(struct vmbus_channel *chan)
0194 {
0195     u32 readable = hv_get_bytes_to_read(&chan->inbound);
0196 
0197     if (readable > HVS_PKT_LEN(0)) {
0198         /* At least we have 1 byte to read. We don't need to return
0199          * the exact readable bytes: see vsock_stream_recvmsg() ->
0200          * vsock_stream_has_data().
0201          */
0202         return 1;
0203     }
0204 
0205     if (readable == HVS_PKT_LEN(0)) {
0206         /* 0-size payload means FIN */
0207         return 0;
0208     }
0209 
0210     /* No payload or FIN */
0211     return -1;
0212 }
0213 
0214 static size_t hvs_channel_writable_bytes(struct vmbus_channel *chan)
0215 {
0216     u32 writeable = hv_get_bytes_to_write(&chan->outbound);
0217     size_t ret;
0218 
0219     /* The ringbuffer mustn't be 100% full, and we should reserve a
0220      * zero-length-payload packet for the FIN: see hv_ringbuffer_write()
0221      * and hvs_shutdown().
0222      */
0223     if (writeable <= HVS_PKT_LEN(1) + HVS_PKT_LEN(0))
0224         return 0;
0225 
0226     ret = writeable - HVS_PKT_LEN(1) - HVS_PKT_LEN(0);
0227 
0228     return round_down(ret, 8);
0229 }
0230 
0231 static int __hvs_send_data(struct vmbus_channel *chan,
0232                struct vmpipe_proto_header *hdr,
0233                size_t to_write)
0234 {
0235     hdr->pkt_type = 1;
0236     hdr->data_size = to_write;
0237     return vmbus_sendpacket(chan, hdr, sizeof(*hdr) + to_write,
0238                 0, VM_PKT_DATA_INBAND, 0);
0239 }
0240 
0241 static int hvs_send_data(struct vmbus_channel *chan,
0242              struct hvs_send_buf *send_buf, size_t to_write)
0243 {
0244     return __hvs_send_data(chan, &send_buf->hdr, to_write);
0245 }
0246 
0247 static void hvs_channel_cb(void *ctx)
0248 {
0249     struct sock *sk = (struct sock *)ctx;
0250     struct vsock_sock *vsk = vsock_sk(sk);
0251     struct hvsock *hvs = vsk->trans;
0252     struct vmbus_channel *chan = hvs->chan;
0253 
0254     if (hvs_channel_readable(chan))
0255         sk->sk_data_ready(sk);
0256 
0257     if (hv_get_bytes_to_write(&chan->outbound) > 0)
0258         sk->sk_write_space(sk);
0259 }
0260 
0261 static void hvs_do_close_lock_held(struct vsock_sock *vsk,
0262                    bool cancel_timeout)
0263 {
0264     struct sock *sk = sk_vsock(vsk);
0265 
0266     sock_set_flag(sk, SOCK_DONE);
0267     vsk->peer_shutdown = SHUTDOWN_MASK;
0268     if (vsock_stream_has_data(vsk) <= 0)
0269         sk->sk_state = TCP_CLOSING;
0270     sk->sk_state_change(sk);
0271     if (vsk->close_work_scheduled &&
0272         (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
0273         vsk->close_work_scheduled = false;
0274         vsock_remove_sock(vsk);
0275 
0276         /* Release the reference taken while scheduling the timeout */
0277         sock_put(sk);
0278     }
0279 }
0280 
0281 static void hvs_close_connection(struct vmbus_channel *chan)
0282 {
0283     struct sock *sk = get_per_channel_state(chan);
0284 
0285     lock_sock(sk);
0286     hvs_do_close_lock_held(vsock_sk(sk), true);
0287     release_sock(sk);
0288 
0289     /* Release the refcnt for the channel that's opened in
0290      * hvs_open_connection().
0291      */
0292     sock_put(sk);
0293 }
0294 
0295 static void hvs_open_connection(struct vmbus_channel *chan)
0296 {
0297     guid_t *if_instance, *if_type;
0298     unsigned char conn_from_host;
0299 
0300     struct sockaddr_vm addr;
0301     struct sock *sk, *new = NULL;
0302     struct vsock_sock *vnew = NULL;
0303     struct hvsock *hvs = NULL;
0304     struct hvsock *hvs_new = NULL;
0305     int rcvbuf;
0306     int ret;
0307     int sndbuf;
0308 
0309     if_type = &chan->offermsg.offer.if_type;
0310     if_instance = &chan->offermsg.offer.if_instance;
0311     conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
0312     if (!is_valid_srv_id(if_type))
0313         return;
0314 
0315     hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
0316     sk = vsock_find_bound_socket(&addr);
0317     if (!sk)
0318         return;
0319 
0320     lock_sock(sk);
0321     if ((conn_from_host && sk->sk_state != TCP_LISTEN) ||
0322         (!conn_from_host && sk->sk_state != TCP_SYN_SENT))
0323         goto out;
0324 
0325     if (conn_from_host) {
0326         if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog)
0327             goto out;
0328 
0329         new = vsock_create_connected(sk);
0330         if (!new)
0331             goto out;
0332 
0333         new->sk_state = TCP_SYN_SENT;
0334         vnew = vsock_sk(new);
0335 
0336         hvs_addr_init(&vnew->local_addr, if_type);
0337 
0338         /* Remote peer is always the host */
0339         vsock_addr_init(&vnew->remote_addr,
0340                 VMADDR_CID_HOST, VMADDR_PORT_ANY);
0341         vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance);
0342         ret = vsock_assign_transport(vnew, vsock_sk(sk));
0343         /* Transport assigned (looking at remote_addr) must be the
0344          * same where we received the request.
0345          */
0346         if (ret || !hvs_check_transport(vnew)) {
0347             sock_put(new);
0348             goto out;
0349         }
0350         hvs_new = vnew->trans;
0351         hvs_new->chan = chan;
0352     } else {
0353         hvs = vsock_sk(sk)->trans;
0354         hvs->chan = chan;
0355     }
0356 
0357     set_channel_read_mode(chan, HV_CALL_DIRECT);
0358 
0359     /* Use the socket buffer sizes as hints for the VMBUS ring size. For
0360      * server side sockets, 'sk' is the parent socket and thus, this will
0361      * allow the child sockets to inherit the size from the parent. Keep
0362      * the mins to the default value and align to page size as per VMBUS
0363      * requirements.
0364      * For the max, the socket core library will limit the socket buffer
0365      * size that can be set by the user, but, since currently, the hv_sock
0366      * VMBUS ring buffer is physically contiguous allocation, restrict it
0367      * further.
0368      * Older versions of hv_sock host side code cannot handle bigger VMBUS
0369      * ring buffer size. Use the version number to limit the change to newer
0370      * versions.
0371      */
0372     if (vmbus_proto_version < VERSION_WIN10_V5) {
0373         sndbuf = RINGBUFFER_HVS_SND_SIZE;
0374         rcvbuf = RINGBUFFER_HVS_RCV_SIZE;
0375     } else {
0376         sndbuf = max_t(int, sk->sk_sndbuf, RINGBUFFER_HVS_SND_SIZE);
0377         sndbuf = min_t(int, sndbuf, RINGBUFFER_HVS_MAX_SIZE);
0378         sndbuf = ALIGN(sndbuf, HV_HYP_PAGE_SIZE);
0379         rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE);
0380         rcvbuf = min_t(int, rcvbuf, RINGBUFFER_HVS_MAX_SIZE);
0381         rcvbuf = ALIGN(rcvbuf, HV_HYP_PAGE_SIZE);
0382     }
0383 
0384     chan->max_pkt_size = HVS_MAX_PKT_SIZE;
0385 
0386     ret = vmbus_open(chan, sndbuf, rcvbuf, NULL, 0, hvs_channel_cb,
0387              conn_from_host ? new : sk);
0388     if (ret != 0) {
0389         if (conn_from_host) {
0390             hvs_new->chan = NULL;
0391             sock_put(new);
0392         } else {
0393             hvs->chan = NULL;
0394         }
0395         goto out;
0396     }
0397 
0398     set_per_channel_state(chan, conn_from_host ? new : sk);
0399 
0400     /* This reference will be dropped by hvs_close_connection(). */
0401     sock_hold(conn_from_host ? new : sk);
0402     vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
0403 
0404     /* Set the pending send size to max packet size to always get
0405      * notifications from the host when there is enough writable space.
0406      * The host is optimized to send notifications only when the pending
0407      * size boundary is crossed, and not always.
0408      */
0409     hvs_set_channel_pending_send_size(chan);
0410 
0411     if (conn_from_host) {
0412         new->sk_state = TCP_ESTABLISHED;
0413         sk_acceptq_added(sk);
0414 
0415         hvs_new->vm_srv_id = *if_type;
0416         hvs_new->host_srv_id = *if_instance;
0417 
0418         vsock_insert_connected(vnew);
0419 
0420         vsock_enqueue_accept(sk, new);
0421     } else {
0422         sk->sk_state = TCP_ESTABLISHED;
0423         sk->sk_socket->state = SS_CONNECTED;
0424 
0425         vsock_insert_connected(vsock_sk(sk));
0426     }
0427 
0428     sk->sk_state_change(sk);
0429 
0430 out:
0431     /* Release refcnt obtained when we called vsock_find_bound_socket() */
0432     sock_put(sk);
0433 
0434     release_sock(sk);
0435 }
0436 
0437 static u32 hvs_get_local_cid(void)
0438 {
0439     return VMADDR_CID_ANY;
0440 }
0441 
0442 static int hvs_sock_init(struct vsock_sock *vsk, struct vsock_sock *psk)
0443 {
0444     struct hvsock *hvs;
0445     struct sock *sk = sk_vsock(vsk);
0446 
0447     hvs = kzalloc(sizeof(*hvs), GFP_KERNEL);
0448     if (!hvs)
0449         return -ENOMEM;
0450 
0451     vsk->trans = hvs;
0452     hvs->vsk = vsk;
0453     sk->sk_sndbuf = RINGBUFFER_HVS_SND_SIZE;
0454     sk->sk_rcvbuf = RINGBUFFER_HVS_RCV_SIZE;
0455     return 0;
0456 }
0457 
0458 static int hvs_connect(struct vsock_sock *vsk)
0459 {
0460     union hvs_service_id vm, host;
0461     struct hvsock *h = vsk->trans;
0462 
0463     vm.srv_id = srv_id_template;
0464     vm.svm_port = vsk->local_addr.svm_port;
0465     h->vm_srv_id = vm.srv_id;
0466 
0467     host.srv_id = srv_id_template;
0468     host.svm_port = vsk->remote_addr.svm_port;
0469     h->host_srv_id = host.srv_id;
0470 
0471     return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
0472 }
0473 
0474 static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
0475 {
0476     struct vmpipe_proto_header hdr;
0477 
0478     if (hvs->fin_sent || !hvs->chan)
0479         return;
0480 
0481     /* It can't fail: see hvs_channel_writable_bytes(). */
0482     (void)__hvs_send_data(hvs->chan, &hdr, 0);
0483     hvs->fin_sent = true;
0484 }
0485 
0486 static int hvs_shutdown(struct vsock_sock *vsk, int mode)
0487 {
0488     if (!(mode & SEND_SHUTDOWN))
0489         return 0;
0490 
0491     hvs_shutdown_lock_held(vsk->trans, mode);
0492     return 0;
0493 }
0494 
0495 static void hvs_close_timeout(struct work_struct *work)
0496 {
0497     struct vsock_sock *vsk =
0498         container_of(work, struct vsock_sock, close_work.work);
0499     struct sock *sk = sk_vsock(vsk);
0500 
0501     sock_hold(sk);
0502     lock_sock(sk);
0503     if (!sock_flag(sk, SOCK_DONE))
0504         hvs_do_close_lock_held(vsk, false);
0505 
0506     vsk->close_work_scheduled = false;
0507     release_sock(sk);
0508     sock_put(sk);
0509 }
0510 
0511 /* Returns true, if it is safe to remove socket; false otherwise */
0512 static bool hvs_close_lock_held(struct vsock_sock *vsk)
0513 {
0514     struct sock *sk = sk_vsock(vsk);
0515 
0516     if (!(sk->sk_state == TCP_ESTABLISHED ||
0517           sk->sk_state == TCP_CLOSING))
0518         return true;
0519 
0520     if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
0521         hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
0522 
0523     if (sock_flag(sk, SOCK_DONE))
0524         return true;
0525 
0526     /* This reference will be dropped by the delayed close routine */
0527     sock_hold(sk);
0528     INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
0529     vsk->close_work_scheduled = true;
0530     schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
0531     return false;
0532 }
0533 
0534 static void hvs_release(struct vsock_sock *vsk)
0535 {
0536     bool remove_sock;
0537 
0538     remove_sock = hvs_close_lock_held(vsk);
0539     if (remove_sock)
0540         vsock_remove_sock(vsk);
0541 }
0542 
0543 static void hvs_destruct(struct vsock_sock *vsk)
0544 {
0545     struct hvsock *hvs = vsk->trans;
0546     struct vmbus_channel *chan = hvs->chan;
0547 
0548     if (chan)
0549         vmbus_hvsock_device_unregister(chan);
0550 
0551     kfree(hvs);
0552 }
0553 
0554 static int hvs_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr)
0555 {
0556     return -EOPNOTSUPP;
0557 }
0558 
0559 static int hvs_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
0560                  size_t len, int flags)
0561 {
0562     return -EOPNOTSUPP;
0563 }
0564 
0565 static int hvs_dgram_enqueue(struct vsock_sock *vsk,
0566                  struct sockaddr_vm *remote, struct msghdr *msg,
0567                  size_t dgram_len)
0568 {
0569     return -EOPNOTSUPP;
0570 }
0571 
0572 static bool hvs_dgram_allow(u32 cid, u32 port)
0573 {
0574     return false;
0575 }
0576 
0577 static int hvs_update_recv_data(struct hvsock *hvs)
0578 {
0579     struct hvs_recv_buf *recv_buf;
0580     u32 pkt_len, payload_len;
0581 
0582     pkt_len = hv_pkt_len(hvs->recv_desc);
0583 
0584     if (pkt_len < HVS_HEADER_LEN)
0585         return -EIO;
0586 
0587     recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1);
0588     payload_len = recv_buf->hdr.data_size;
0589 
0590     if (payload_len > pkt_len - HVS_HEADER_LEN ||
0591         payload_len > HVS_MTU_SIZE)
0592         return -EIO;
0593 
0594     if (payload_len == 0)
0595         hvs->vsk->peer_shutdown |= SEND_SHUTDOWN;
0596 
0597     hvs->recv_data_len = payload_len;
0598     hvs->recv_data_off = 0;
0599 
0600     return 0;
0601 }
0602 
0603 static ssize_t hvs_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
0604                   size_t len, int flags)
0605 {
0606     struct hvsock *hvs = vsk->trans;
0607     bool need_refill = !hvs->recv_desc;
0608     struct hvs_recv_buf *recv_buf;
0609     u32 to_read;
0610     int ret;
0611 
0612     if (flags & MSG_PEEK)
0613         return -EOPNOTSUPP;
0614 
0615     if (need_refill) {
0616         hvs->recv_desc = hv_pkt_iter_first(hvs->chan);
0617         if (!hvs->recv_desc)
0618             return -ENOBUFS;
0619         ret = hvs_update_recv_data(hvs);
0620         if (ret)
0621             return ret;
0622     }
0623 
0624     recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1);
0625     to_read = min_t(u32, len, hvs->recv_data_len);
0626     ret = memcpy_to_msg(msg, recv_buf->data + hvs->recv_data_off, to_read);
0627     if (ret != 0)
0628         return ret;
0629 
0630     hvs->recv_data_len -= to_read;
0631     if (hvs->recv_data_len == 0) {
0632         hvs->recv_desc = hv_pkt_iter_next(hvs->chan, hvs->recv_desc);
0633         if (hvs->recv_desc) {
0634             ret = hvs_update_recv_data(hvs);
0635             if (ret)
0636                 return ret;
0637         }
0638     } else {
0639         hvs->recv_data_off += to_read;
0640     }
0641 
0642     return to_read;
0643 }
0644 
0645 static ssize_t hvs_stream_enqueue(struct vsock_sock *vsk, struct msghdr *msg,
0646                   size_t len)
0647 {
0648     struct hvsock *hvs = vsk->trans;
0649     struct vmbus_channel *chan = hvs->chan;
0650     struct hvs_send_buf *send_buf;
0651     ssize_t to_write, max_writable;
0652     ssize_t ret = 0;
0653     ssize_t bytes_written = 0;
0654 
0655     BUILD_BUG_ON(sizeof(*send_buf) != HV_HYP_PAGE_SIZE);
0656 
0657     send_buf = kmalloc(sizeof(*send_buf), GFP_KERNEL);
0658     if (!send_buf)
0659         return -ENOMEM;
0660 
0661     /* Reader(s) could be draining data from the channel as we write.
0662      * Maximize bandwidth, by iterating until the channel is found to be
0663      * full.
0664      */
0665     while (len) {
0666         max_writable = hvs_channel_writable_bytes(chan);
0667         if (!max_writable)
0668             break;
0669         to_write = min_t(ssize_t, len, max_writable);
0670         to_write = min_t(ssize_t, to_write, HVS_SEND_BUF_SIZE);
0671         /* memcpy_from_msg is safe for loop as it advances the offsets
0672          * within the message iterator.
0673          */
0674         ret = memcpy_from_msg(send_buf->data, msg, to_write);
0675         if (ret < 0)
0676             goto out;
0677 
0678         ret = hvs_send_data(hvs->chan, send_buf, to_write);
0679         if (ret < 0)
0680             goto out;
0681 
0682         bytes_written += to_write;
0683         len -= to_write;
0684     }
0685 out:
0686     /* If any data has been sent, return that */
0687     if (bytes_written)
0688         ret = bytes_written;
0689     kfree(send_buf);
0690     return ret;
0691 }
0692 
0693 static s64 hvs_stream_has_data(struct vsock_sock *vsk)
0694 {
0695     struct hvsock *hvs = vsk->trans;
0696     s64 ret;
0697 
0698     if (hvs->recv_data_len > 0)
0699         return 1;
0700 
0701     switch (hvs_channel_readable_payload(hvs->chan)) {
0702     case 1:
0703         ret = 1;
0704         break;
0705     case 0:
0706         vsk->peer_shutdown |= SEND_SHUTDOWN;
0707         ret = 0;
0708         break;
0709     default: /* -1 */
0710         ret = 0;
0711         break;
0712     }
0713 
0714     return ret;
0715 }
0716 
0717 static s64 hvs_stream_has_space(struct vsock_sock *vsk)
0718 {
0719     struct hvsock *hvs = vsk->trans;
0720 
0721     return hvs_channel_writable_bytes(hvs->chan);
0722 }
0723 
0724 static u64 hvs_stream_rcvhiwat(struct vsock_sock *vsk)
0725 {
0726     return HVS_MTU_SIZE + 1;
0727 }
0728 
0729 static bool hvs_stream_is_active(struct vsock_sock *vsk)
0730 {
0731     struct hvsock *hvs = vsk->trans;
0732 
0733     return hvs->chan != NULL;
0734 }
0735 
0736 static bool hvs_stream_allow(u32 cid, u32 port)
0737 {
0738     if (cid == VMADDR_CID_HOST)
0739         return true;
0740 
0741     return false;
0742 }
0743 
0744 static
0745 int hvs_notify_poll_in(struct vsock_sock *vsk, size_t target, bool *readable)
0746 {
0747     struct hvsock *hvs = vsk->trans;
0748 
0749     *readable = hvs_channel_readable(hvs->chan);
0750     return 0;
0751 }
0752 
0753 static
0754 int hvs_notify_poll_out(struct vsock_sock *vsk, size_t target, bool *writable)
0755 {
0756     *writable = hvs_stream_has_space(vsk) > 0;
0757 
0758     return 0;
0759 }
0760 
0761 static
0762 int hvs_notify_recv_init(struct vsock_sock *vsk, size_t target,
0763              struct vsock_transport_recv_notify_data *d)
0764 {
0765     return 0;
0766 }
0767 
0768 static
0769 int hvs_notify_recv_pre_block(struct vsock_sock *vsk, size_t target,
0770                   struct vsock_transport_recv_notify_data *d)
0771 {
0772     return 0;
0773 }
0774 
0775 static
0776 int hvs_notify_recv_pre_dequeue(struct vsock_sock *vsk, size_t target,
0777                 struct vsock_transport_recv_notify_data *d)
0778 {
0779     return 0;
0780 }
0781 
0782 static
0783 int hvs_notify_recv_post_dequeue(struct vsock_sock *vsk, size_t target,
0784                  ssize_t copied, bool data_read,
0785                  struct vsock_transport_recv_notify_data *d)
0786 {
0787     return 0;
0788 }
0789 
0790 static
0791 int hvs_notify_send_init(struct vsock_sock *vsk,
0792              struct vsock_transport_send_notify_data *d)
0793 {
0794     return 0;
0795 }
0796 
0797 static
0798 int hvs_notify_send_pre_block(struct vsock_sock *vsk,
0799                   struct vsock_transport_send_notify_data *d)
0800 {
0801     return 0;
0802 }
0803 
0804 static
0805 int hvs_notify_send_pre_enqueue(struct vsock_sock *vsk,
0806                 struct vsock_transport_send_notify_data *d)
0807 {
0808     return 0;
0809 }
0810 
0811 static
0812 int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written,
0813                  struct vsock_transport_send_notify_data *d)
0814 {
0815     return 0;
0816 }
0817 
0818 static struct vsock_transport hvs_transport = {
0819     .module                   = THIS_MODULE,
0820 
0821     .get_local_cid            = hvs_get_local_cid,
0822 
0823     .init                     = hvs_sock_init,
0824     .destruct                 = hvs_destruct,
0825     .release                  = hvs_release,
0826     .connect                  = hvs_connect,
0827     .shutdown                 = hvs_shutdown,
0828 
0829     .dgram_bind               = hvs_dgram_bind,
0830     .dgram_dequeue            = hvs_dgram_dequeue,
0831     .dgram_enqueue            = hvs_dgram_enqueue,
0832     .dgram_allow              = hvs_dgram_allow,
0833 
0834     .stream_dequeue           = hvs_stream_dequeue,
0835     .stream_enqueue           = hvs_stream_enqueue,
0836     .stream_has_data          = hvs_stream_has_data,
0837     .stream_has_space         = hvs_stream_has_space,
0838     .stream_rcvhiwat          = hvs_stream_rcvhiwat,
0839     .stream_is_active         = hvs_stream_is_active,
0840     .stream_allow             = hvs_stream_allow,
0841 
0842     .notify_poll_in           = hvs_notify_poll_in,
0843     .notify_poll_out          = hvs_notify_poll_out,
0844     .notify_recv_init         = hvs_notify_recv_init,
0845     .notify_recv_pre_block    = hvs_notify_recv_pre_block,
0846     .notify_recv_pre_dequeue  = hvs_notify_recv_pre_dequeue,
0847     .notify_recv_post_dequeue = hvs_notify_recv_post_dequeue,
0848     .notify_send_init         = hvs_notify_send_init,
0849     .notify_send_pre_block    = hvs_notify_send_pre_block,
0850     .notify_send_pre_enqueue  = hvs_notify_send_pre_enqueue,
0851     .notify_send_post_enqueue = hvs_notify_send_post_enqueue,
0852 
0853 };
0854 
0855 static bool hvs_check_transport(struct vsock_sock *vsk)
0856 {
0857     return vsk->transport == &hvs_transport;
0858 }
0859 
0860 static int hvs_probe(struct hv_device *hdev,
0861              const struct hv_vmbus_device_id *dev_id)
0862 {
0863     struct vmbus_channel *chan = hdev->channel;
0864 
0865     hvs_open_connection(chan);
0866 
0867     /* Always return success to suppress the unnecessary error message
0868      * in vmbus_probe(): on error the host will rescind the device in
0869      * 30 seconds and we can do cleanup at that time in
0870      * vmbus_onoffer_rescind().
0871      */
0872     return 0;
0873 }
0874 
0875 static int hvs_remove(struct hv_device *hdev)
0876 {
0877     struct vmbus_channel *chan = hdev->channel;
0878 
0879     vmbus_close(chan);
0880 
0881     return 0;
0882 }
0883 
0884 /* hv_sock connections can not persist across hibernation, and all the hv_sock
0885  * channels are forced to be rescinded before hibernation: see
0886  * vmbus_bus_suspend(). Here the dummy hvs_suspend() and hvs_resume()
0887  * are only needed because hibernation requires that every vmbus device's
0888  * driver should have a .suspend and .resume callback: see vmbus_suspend().
0889  */
0890 static int hvs_suspend(struct hv_device *hv_dev)
0891 {
0892     /* Dummy */
0893     return 0;
0894 }
0895 
0896 static int hvs_resume(struct hv_device *dev)
0897 {
0898     /* Dummy */
0899     return 0;
0900 }
0901 
0902 /* This isn't really used. See vmbus_match() and vmbus_probe() */
0903 static const struct hv_vmbus_device_id id_table[] = {
0904     {},
0905 };
0906 
0907 static struct hv_driver hvs_drv = {
0908     .name       = "hv_sock",
0909     .hvsock     = true,
0910     .id_table   = id_table,
0911     .probe      = hvs_probe,
0912     .remove     = hvs_remove,
0913     .suspend    = hvs_suspend,
0914     .resume     = hvs_resume,
0915 };
0916 
0917 static int __init hvs_init(void)
0918 {
0919     int ret;
0920 
0921     if (vmbus_proto_version < VERSION_WIN10)
0922         return -ENODEV;
0923 
0924     ret = vmbus_driver_register(&hvs_drv);
0925     if (ret != 0)
0926         return ret;
0927 
0928     ret = vsock_core_register(&hvs_transport, VSOCK_TRANSPORT_F_G2H);
0929     if (ret) {
0930         vmbus_driver_unregister(&hvs_drv);
0931         return ret;
0932     }
0933 
0934     return 0;
0935 }
0936 
0937 static void __exit hvs_exit(void)
0938 {
0939     vsock_core_unregister(&hvs_transport);
0940     vmbus_driver_unregister(&hvs_drv);
0941 }
0942 
0943 module_init(hvs_init);
0944 module_exit(hvs_exit);
0945 
0946 MODULE_DESCRIPTION("Hyper-V Sockets");
0947 MODULE_VERSION("1.0.0");
0948 MODULE_LICENSE("GPL");
0949 MODULE_ALIAS_NETPROTO(PF_VSOCK);