Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *
0004  * Copyright (c) 2011, Microsoft Corporation.
0005  *
0006  * Authors:
0007  *   Haiyang Zhang <haiyangz@microsoft.com>
0008  *   Hank Janssen  <hjanssen@microsoft.com>
0009  *   K. Y. Srinivasan <kys@microsoft.com>
0010  */
0011 
0012 #ifndef _HYPERV_H
0013 #define _HYPERV_H
0014 
0015 #include <uapi/linux/hyperv.h>
0016 
0017 #include <linux/mm.h>
0018 #include <linux/types.h>
0019 #include <linux/scatterlist.h>
0020 #include <linux/list.h>
0021 #include <linux/timer.h>
0022 #include <linux/completion.h>
0023 #include <linux/device.h>
0024 #include <linux/mod_devicetable.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/reciprocal_div.h>
0027 #include <asm/hyperv-tlfs.h>
0028 
0029 #define MAX_PAGE_BUFFER_COUNT               32
0030 #define MAX_MULTIPAGE_BUFFER_COUNT          32 /* 128K */
0031 
0032 #pragma pack(push, 1)
0033 
0034 /*
0035  * Types for GPADL, decides is how GPADL header is created.
0036  *
0037  * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
0038  * same as HV_HYP_PAGE_SIZE.
0039  *
0040  * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
0041  * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
0042  * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
0043  * HV_HYP_PAGE will be different between different types of GPADL, for example
0044  * if PAGE_SIZE is 64K:
0045  *
0046  * BUFFER:
0047  *
0048  * gva:    |--       64k      --|--       64k      --| ... |
0049  * gpa:    | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
0050  * index:  0    1    2     15   16   17   18 .. 31   32 ...
0051  *         |    |    ...   |    |    |   ...    |   ...
0052  *         v    V          V    V    V          V
0053  * gpadl:  | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
0054  * index:  0    1    2 ... 15   16   17   18 .. 31   32 ...
0055  *
0056  * RING:
0057  *
0058  *         | header  |           data           | header  |     data      |
0059  * gva:    |-- 64k --|--       64k      --| ... |-- 64k --|-- 64k --| ... |
0060  * gpa:    | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
0061  * index:  0    1    16   17   18    31   ...   n   n+1  n+16 ...         2n
0062  *         |         /    /          /          |         /               /
0063  *         |        /    /          /           |        /               /
0064  *         |       /    /   ...    /    ...     |       /      ...      /
0065  *         |      /    /          /             |      /               /
0066  *         |     /    /          /              |     /               /
0067  *         V    V    V          V               V    V               v
0068  * gpadl:  | 4k | 4k |   ...    |    ...        | 4k | 4k |  ...     |
0069  * index:  0    1    2   ...    16   ...       n-15 n-14 n-13  ...  2n-30
0070  */
0071 enum hv_gpadl_type {
0072     HV_GPADL_BUFFER,
0073     HV_GPADL_RING
0074 };
0075 
0076 /* Single-page buffer */
0077 struct hv_page_buffer {
0078     u32 len;
0079     u32 offset;
0080     u64 pfn;
0081 };
0082 
0083 /* Multiple-page buffer */
0084 struct hv_multipage_buffer {
0085     /* Length and Offset determines the # of pfns in the array */
0086     u32 len;
0087     u32 offset;
0088     u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
0089 };
0090 
0091 /*
0092  * Multiple-page buffer array; the pfn array is variable size:
0093  * The number of entries in the PFN array is determined by
0094  * "len" and "offset".
0095  */
0096 struct hv_mpb_array {
0097     /* Length and Offset determines the # of pfns in the array */
0098     u32 len;
0099     u32 offset;
0100     u64 pfn_array[];
0101 };
0102 
0103 /* 0x18 includes the proprietary packet header */
0104 #define MAX_PAGE_BUFFER_PACKET      (0x18 +         \
0105                     (sizeof(struct hv_page_buffer) * \
0106                      MAX_PAGE_BUFFER_COUNT))
0107 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 +         \
0108                      sizeof(struct hv_multipage_buffer))
0109 
0110 
0111 #pragma pack(pop)
0112 
0113 struct hv_ring_buffer {
0114     /* Offset in bytes from the start of ring data below */
0115     u32 write_index;
0116 
0117     /* Offset in bytes from the start of ring data below */
0118     u32 read_index;
0119 
0120     u32 interrupt_mask;
0121 
0122     /*
0123      * WS2012/Win8 and later versions of Hyper-V implement interrupt
0124      * driven flow management. The feature bit feat_pending_send_sz
0125      * is set by the host on the host->guest ring buffer, and by the
0126      * guest on the guest->host ring buffer.
0127      *
0128      * The meaning of the feature bit is a bit complex in that it has
0129      * semantics that apply to both ring buffers.  If the guest sets
0130      * the feature bit in the guest->host ring buffer, the guest is
0131      * telling the host that:
0132      * 1) It will set the pending_send_sz field in the guest->host ring
0133      *    buffer when it is waiting for space to become available, and
0134      * 2) It will read the pending_send_sz field in the host->guest
0135      *    ring buffer and interrupt the host when it frees enough space
0136      *
0137      * Similarly, if the host sets the feature bit in the host->guest
0138      * ring buffer, the host is telling the guest that:
0139      * 1) It will set the pending_send_sz field in the host->guest ring
0140      *    buffer when it is waiting for space to become available, and
0141      * 2) It will read the pending_send_sz field in the guest->host
0142      *    ring buffer and interrupt the guest when it frees enough space
0143      *
0144      * If either the guest or host does not set the feature bit that it
0145      * owns, that guest or host must do polling if it encounters a full
0146      * ring buffer, and not signal the other end with an interrupt.
0147      */
0148     u32 pending_send_sz;
0149     u32 reserved1[12];
0150     union {
0151         struct {
0152             u32 feat_pending_send_sz:1;
0153         };
0154         u32 value;
0155     } feature_bits;
0156 
0157     /* Pad it to PAGE_SIZE so that data starts on page boundary */
0158     u8  reserved2[PAGE_SIZE - 68];
0159 
0160     /*
0161      * Ring data starts here + RingDataStartOffset
0162      * !!! DO NOT place any fields below this !!!
0163      */
0164     u8 buffer[];
0165 } __packed;
0166 
0167 /* Calculate the proper size of a ringbuffer, it must be page-aligned */
0168 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
0169                            (payload_sz))
0170 
0171 struct hv_ring_buffer_info {
0172     struct hv_ring_buffer *ring_buffer;
0173     u32 ring_size;          /* Include the shared header */
0174     struct reciprocal_value ring_size_div10_reciprocal;
0175     spinlock_t ring_lock;
0176 
0177     u32 ring_datasize;      /* < ring_size */
0178     u32 priv_read_index;
0179     /*
0180      * The ring buffer mutex lock. This lock prevents the ring buffer from
0181      * being freed while the ring buffer is being accessed.
0182      */
0183     struct mutex ring_buffer_mutex;
0184 
0185     /* Buffer that holds a copy of an incoming host packet */
0186     void *pkt_buffer;
0187     u32 pkt_buffer_size;
0188 };
0189 
0190 
0191 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
0192 {
0193     u32 read_loc, write_loc, dsize, read;
0194 
0195     dsize = rbi->ring_datasize;
0196     read_loc = rbi->ring_buffer->read_index;
0197     write_loc = READ_ONCE(rbi->ring_buffer->write_index);
0198 
0199     read = write_loc >= read_loc ? (write_loc - read_loc) :
0200         (dsize - read_loc) + write_loc;
0201 
0202     return read;
0203 }
0204 
0205 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
0206 {
0207     u32 read_loc, write_loc, dsize, write;
0208 
0209     dsize = rbi->ring_datasize;
0210     read_loc = READ_ONCE(rbi->ring_buffer->read_index);
0211     write_loc = rbi->ring_buffer->write_index;
0212 
0213     write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
0214         read_loc - write_loc;
0215     return write;
0216 }
0217 
0218 static inline u32 hv_get_avail_to_write_percent(
0219         const struct hv_ring_buffer_info *rbi)
0220 {
0221     u32 avail_write = hv_get_bytes_to_write(rbi);
0222 
0223     return reciprocal_divide(
0224             (avail_write  << 3) + (avail_write << 1),
0225             rbi->ring_size_div10_reciprocal);
0226 }
0227 
0228 /*
0229  * VMBUS version is 32 bit entity broken up into
0230  * two 16 bit quantities: major_number. minor_number.
0231  *
0232  * 0 . 13 (Windows Server 2008)
0233  * 1 . 1  (Windows 7, WS2008 R2)
0234  * 2 . 4  (Windows 8, WS2012)
0235  * 3 . 0  (Windows 8.1, WS2012 R2)
0236  * 4 . 0  (Windows 10)
0237  * 4 . 1  (Windows 10 RS3)
0238  * 5 . 0  (Newer Windows 10)
0239  * 5 . 1  (Windows 10 RS4)
0240  * 5 . 2  (Windows Server 2019, RS5)
0241  * 5 . 3  (Windows Server 2022)
0242  *
0243  * The WS2008 and WIN7 versions are listed here for
0244  * completeness but are no longer supported in the
0245  * Linux kernel.
0246  */
0247 
0248 #define VERSION_WS2008  ((0 << 16) | (13))
0249 #define VERSION_WIN7    ((1 << 16) | (1))
0250 #define VERSION_WIN8    ((2 << 16) | (4))
0251 #define VERSION_WIN8_1    ((3 << 16) | (0))
0252 #define VERSION_WIN10 ((4 << 16) | (0))
0253 #define VERSION_WIN10_V4_1 ((4 << 16) | (1))
0254 #define VERSION_WIN10_V5 ((5 << 16) | (0))
0255 #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
0256 #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
0257 #define VERSION_WIN10_V5_3 ((5 << 16) | (3))
0258 
0259 /* Make maximum size of pipe payload of 16K */
0260 #define MAX_PIPE_DATA_PAYLOAD       (sizeof(u8) * 16384)
0261 
0262 /* Define PipeMode values. */
0263 #define VMBUS_PIPE_TYPE_BYTE        0x00000000
0264 #define VMBUS_PIPE_TYPE_MESSAGE     0x00000004
0265 
0266 /* The size of the user defined data buffer for non-pipe offers. */
0267 #define MAX_USER_DEFINED_BYTES      120
0268 
0269 /* The size of the user defined data buffer for pipe offers. */
0270 #define MAX_PIPE_USER_DEFINED_BYTES 116
0271 
0272 /*
0273  * At the center of the Channel Management library is the Channel Offer. This
0274  * struct contains the fundamental information about an offer.
0275  */
0276 struct vmbus_channel_offer {
0277     guid_t if_type;
0278     guid_t if_instance;
0279 
0280     /*
0281      * These two fields are not currently used.
0282      */
0283     u64 reserved1;
0284     u64 reserved2;
0285 
0286     u16 chn_flags;
0287     u16 mmio_megabytes;     /* in bytes * 1024 * 1024 */
0288 
0289     union {
0290         /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
0291         struct {
0292             unsigned char user_def[MAX_USER_DEFINED_BYTES];
0293         } std;
0294 
0295         /*
0296          * Pipes:
0297          * The following structure is an integrated pipe protocol, which
0298          * is implemented on top of standard user-defined data. Pipe
0299          * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
0300          * use.
0301          */
0302         struct {
0303             u32  pipe_mode;
0304             unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
0305         } pipe;
0306     } u;
0307     /*
0308      * The sub_channel_index is defined in Win8: a value of zero means a
0309      * primary channel and a value of non-zero means a sub-channel.
0310      *
0311      * Before Win8, the field is reserved, meaning it's always zero.
0312      */
0313     u16 sub_channel_index;
0314     u16 reserved3;
0315 } __packed;
0316 
0317 /* Server Flags */
0318 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE    1
0319 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES    2
0320 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS        4
0321 #define VMBUS_CHANNEL_NAMED_PIPE_MODE           0x10
0322 #define VMBUS_CHANNEL_LOOPBACK_OFFER            0x100
0323 #define VMBUS_CHANNEL_PARENT_OFFER          0x200
0324 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION    0x400
0325 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER      0x2000
0326 
0327 struct vmpacket_descriptor {
0328     u16 type;
0329     u16 offset8;
0330     u16 len8;
0331     u16 flags;
0332     u64 trans_id;
0333 } __packed;
0334 
0335 struct vmpacket_header {
0336     u32 prev_pkt_start_offset;
0337     struct vmpacket_descriptor descriptor;
0338 } __packed;
0339 
0340 struct vmtransfer_page_range {
0341     u32 byte_count;
0342     u32 byte_offset;
0343 } __packed;
0344 
0345 struct vmtransfer_page_packet_header {
0346     struct vmpacket_descriptor d;
0347     u16 xfer_pageset_id;
0348     u8  sender_owns_set;
0349     u8 reserved;
0350     u32 range_cnt;
0351     struct vmtransfer_page_range ranges[1];
0352 } __packed;
0353 
0354 struct vmgpadl_packet_header {
0355     struct vmpacket_descriptor d;
0356     u32 gpadl;
0357     u32 reserved;
0358 } __packed;
0359 
0360 struct vmadd_remove_transfer_page_set {
0361     struct vmpacket_descriptor d;
0362     u32 gpadl;
0363     u16 xfer_pageset_id;
0364     u16 reserved;
0365 } __packed;
0366 
0367 /*
0368  * This structure defines a range in guest physical space that can be made to
0369  * look virtually contiguous.
0370  */
0371 struct gpa_range {
0372     u32 byte_count;
0373     u32 byte_offset;
0374     u64 pfn_array[];
0375 };
0376 
0377 /*
0378  * This is the format for an Establish Gpadl packet, which contains a handle by
0379  * which this GPADL will be known and a set of GPA ranges associated with it.
0380  * This can be converted to a MDL by the guest OS.  If there are multiple GPA
0381  * ranges, then the resulting MDL will be "chained," representing multiple VA
0382  * ranges.
0383  */
0384 struct vmestablish_gpadl {
0385     struct vmpacket_descriptor d;
0386     u32 gpadl;
0387     u32 range_cnt;
0388     struct gpa_range range[1];
0389 } __packed;
0390 
0391 /*
0392  * This is the format for a Teardown Gpadl packet, which indicates that the
0393  * GPADL handle in the Establish Gpadl packet will never be referenced again.
0394  */
0395 struct vmteardown_gpadl {
0396     struct vmpacket_descriptor d;
0397     u32 gpadl;
0398     u32 reserved;   /* for alignment to a 8-byte boundary */
0399 } __packed;
0400 
0401 /*
0402  * This is the format for a GPA-Direct packet, which contains a set of GPA
0403  * ranges, in addition to commands and/or data.
0404  */
0405 struct vmdata_gpa_direct {
0406     struct vmpacket_descriptor d;
0407     u32 reserved;
0408     u32 range_cnt;
0409     struct gpa_range range[1];
0410 } __packed;
0411 
0412 /* This is the format for a Additional Data Packet. */
0413 struct vmadditional_data {
0414     struct vmpacket_descriptor d;
0415     u64 total_bytes;
0416     u32 offset;
0417     u32 byte_cnt;
0418     unsigned char data[1];
0419 } __packed;
0420 
0421 union vmpacket_largest_possible_header {
0422     struct vmpacket_descriptor simple_hdr;
0423     struct vmtransfer_page_packet_header xfer_page_hdr;
0424     struct vmgpadl_packet_header gpadl_hdr;
0425     struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
0426     struct vmestablish_gpadl establish_gpadl_hdr;
0427     struct vmteardown_gpadl teardown_gpadl_hdr;
0428     struct vmdata_gpa_direct data_gpa_direct_hdr;
0429 };
0430 
0431 #define VMPACKET_DATA_START_ADDRESS(__packet)   \
0432     (void *)(((unsigned char *)__packet) +  \
0433      ((struct vmpacket_descriptor)__packet)->offset8 * 8)
0434 
0435 #define VMPACKET_DATA_LENGTH(__packet)      \
0436     ((((struct vmpacket_descriptor)__packet)->len8 -    \
0437       ((struct vmpacket_descriptor)__packet)->offset8) * 8)
0438 
0439 #define VMPACKET_TRANSFER_MODE(__packet)    \
0440     (((struct IMPACT)__packet)->type)
0441 
0442 enum vmbus_packet_type {
0443     VM_PKT_INVALID              = 0x0,
0444     VM_PKT_SYNCH                = 0x1,
0445     VM_PKT_ADD_XFER_PAGESET         = 0x2,
0446     VM_PKT_RM_XFER_PAGESET          = 0x3,
0447     VM_PKT_ESTABLISH_GPADL          = 0x4,
0448     VM_PKT_TEARDOWN_GPADL           = 0x5,
0449     VM_PKT_DATA_INBAND          = 0x6,
0450     VM_PKT_DATA_USING_XFER_PAGES        = 0x7,
0451     VM_PKT_DATA_USING_GPADL         = 0x8,
0452     VM_PKT_DATA_USING_GPA_DIRECT        = 0x9,
0453     VM_PKT_CANCEL_REQUEST           = 0xa,
0454     VM_PKT_COMP             = 0xb,
0455     VM_PKT_DATA_USING_ADDITIONAL_PKT    = 0xc,
0456     VM_PKT_ADDITIONAL_DATA          = 0xd
0457 };
0458 
0459 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
0460 
0461 
0462 /* Version 1 messages */
0463 enum vmbus_channel_message_type {
0464     CHANNELMSG_INVALID          =  0,
0465     CHANNELMSG_OFFERCHANNEL     =  1,
0466     CHANNELMSG_RESCIND_CHANNELOFFER =  2,
0467     CHANNELMSG_REQUESTOFFERS        =  3,
0468     CHANNELMSG_ALLOFFERS_DELIVERED  =  4,
0469     CHANNELMSG_OPENCHANNEL      =  5,
0470     CHANNELMSG_OPENCHANNEL_RESULT       =  6,
0471     CHANNELMSG_CLOSECHANNEL     =  7,
0472     CHANNELMSG_GPADL_HEADER     =  8,
0473     CHANNELMSG_GPADL_BODY           =  9,
0474     CHANNELMSG_GPADL_CREATED        = 10,
0475     CHANNELMSG_GPADL_TEARDOWN       = 11,
0476     CHANNELMSG_GPADL_TORNDOWN       = 12,
0477     CHANNELMSG_RELID_RELEASED       = 13,
0478     CHANNELMSG_INITIATE_CONTACT     = 14,
0479     CHANNELMSG_VERSION_RESPONSE     = 15,
0480     CHANNELMSG_UNLOAD           = 16,
0481     CHANNELMSG_UNLOAD_RESPONSE      = 17,
0482     CHANNELMSG_18               = 18,
0483     CHANNELMSG_19               = 19,
0484     CHANNELMSG_20               = 20,
0485     CHANNELMSG_TL_CONNECT_REQUEST       = 21,
0486     CHANNELMSG_MODIFYCHANNEL        = 22,
0487     CHANNELMSG_TL_CONNECT_RESULT        = 23,
0488     CHANNELMSG_MODIFYCHANNEL_RESPONSE   = 24,
0489     CHANNELMSG_COUNT
0490 };
0491 
0492 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
0493 #define INVALID_RELID   U32_MAX
0494 
0495 struct vmbus_channel_message_header {
0496     enum vmbus_channel_message_type msgtype;
0497     u32 padding;
0498 } __packed;
0499 
0500 /* Query VMBus Version parameters */
0501 struct vmbus_channel_query_vmbus_version {
0502     struct vmbus_channel_message_header header;
0503     u32 version;
0504 } __packed;
0505 
0506 /* VMBus Version Supported parameters */
0507 struct vmbus_channel_version_supported {
0508     struct vmbus_channel_message_header header;
0509     u8 version_supported;
0510 } __packed;
0511 
0512 /* Offer Channel parameters */
0513 struct vmbus_channel_offer_channel {
0514     struct vmbus_channel_message_header header;
0515     struct vmbus_channel_offer offer;
0516     u32 child_relid;
0517     u8 monitorid;
0518     /*
0519      * win7 and beyond splits this field into a bit field.
0520      */
0521     u8 monitor_allocated:1;
0522     u8 reserved:7;
0523     /*
0524      * These are new fields added in win7 and later.
0525      * Do not access these fields without checking the
0526      * negotiated protocol.
0527      *
0528      * If "is_dedicated_interrupt" is set, we must not set the
0529      * associated bit in the channel bitmap while sending the
0530      * interrupt to the host.
0531      *
0532      * connection_id is to be used in signaling the host.
0533      */
0534     u16 is_dedicated_interrupt:1;
0535     u16 reserved1:15;
0536     u32 connection_id;
0537 } __packed;
0538 
0539 /* Rescind Offer parameters */
0540 struct vmbus_channel_rescind_offer {
0541     struct vmbus_channel_message_header header;
0542     u32 child_relid;
0543 } __packed;
0544 
0545 /*
0546  * Request Offer -- no parameters, SynIC message contains the partition ID
0547  * Set Snoop -- no parameters, SynIC message contains the partition ID
0548  * Clear Snoop -- no parameters, SynIC message contains the partition ID
0549  * All Offers Delivered -- no parameters, SynIC message contains the partition
0550  *                 ID
0551  * Flush Client -- no parameters, SynIC message contains the partition ID
0552  */
0553 
0554 /* Open Channel parameters */
0555 struct vmbus_channel_open_channel {
0556     struct vmbus_channel_message_header header;
0557 
0558     /* Identifies the specific VMBus channel that is being opened. */
0559     u32 child_relid;
0560 
0561     /* ID making a particular open request at a channel offer unique. */
0562     u32 openid;
0563 
0564     /* GPADL for the channel's ring buffer. */
0565     u32 ringbuffer_gpadlhandle;
0566 
0567     /*
0568      * Starting with win8, this field will be used to specify
0569      * the target virtual processor on which to deliver the interrupt for
0570      * the host to guest communication.
0571      * Prior to win8, incoming channel interrupts would only
0572      * be delivered on cpu 0. Setting this value to 0 would
0573      * preserve the earlier behavior.
0574      */
0575     u32 target_vp;
0576 
0577     /*
0578      * The upstream ring buffer begins at offset zero in the memory
0579      * described by RingBufferGpadlHandle. The downstream ring buffer
0580      * follows it at this offset (in pages).
0581      */
0582     u32 downstream_ringbuffer_pageoffset;
0583 
0584     /* User-specific data to be passed along to the server endpoint. */
0585     unsigned char userdata[MAX_USER_DEFINED_BYTES];
0586 } __packed;
0587 
0588 /* Open Channel Result parameters */
0589 struct vmbus_channel_open_result {
0590     struct vmbus_channel_message_header header;
0591     u32 child_relid;
0592     u32 openid;
0593     u32 status;
0594 } __packed;
0595 
0596 /* Modify Channel Result parameters */
0597 struct vmbus_channel_modifychannel_response {
0598     struct vmbus_channel_message_header header;
0599     u32 child_relid;
0600     u32 status;
0601 } __packed;
0602 
0603 /* Close channel parameters; */
0604 struct vmbus_channel_close_channel {
0605     struct vmbus_channel_message_header header;
0606     u32 child_relid;
0607 } __packed;
0608 
0609 /* Channel Message GPADL */
0610 #define GPADL_TYPE_RING_BUFFER      1
0611 #define GPADL_TYPE_SERVER_SAVE_AREA 2
0612 #define GPADL_TYPE_TRANSACTION      8
0613 
0614 /*
0615  * The number of PFNs in a GPADL message is defined by the number of
0616  * pages that would be spanned by ByteCount and ByteOffset.  If the
0617  * implied number of PFNs won't fit in this packet, there will be a
0618  * follow-up packet that contains more.
0619  */
0620 struct vmbus_channel_gpadl_header {
0621     struct vmbus_channel_message_header header;
0622     u32 child_relid;
0623     u32 gpadl;
0624     u16 range_buflen;
0625     u16 rangecount;
0626     struct gpa_range range[];
0627 } __packed;
0628 
0629 /* This is the followup packet that contains more PFNs. */
0630 struct vmbus_channel_gpadl_body {
0631     struct vmbus_channel_message_header header;
0632     u32 msgnumber;
0633     u32 gpadl;
0634     u64 pfn[];
0635 } __packed;
0636 
0637 struct vmbus_channel_gpadl_created {
0638     struct vmbus_channel_message_header header;
0639     u32 child_relid;
0640     u32 gpadl;
0641     u32 creation_status;
0642 } __packed;
0643 
0644 struct vmbus_channel_gpadl_teardown {
0645     struct vmbus_channel_message_header header;
0646     u32 child_relid;
0647     u32 gpadl;
0648 } __packed;
0649 
0650 struct vmbus_channel_gpadl_torndown {
0651     struct vmbus_channel_message_header header;
0652     u32 gpadl;
0653 } __packed;
0654 
0655 struct vmbus_channel_relid_released {
0656     struct vmbus_channel_message_header header;
0657     u32 child_relid;
0658 } __packed;
0659 
0660 struct vmbus_channel_initiate_contact {
0661     struct vmbus_channel_message_header header;
0662     u32 vmbus_version_requested;
0663     u32 target_vcpu; /* The VCPU the host should respond to */
0664     union {
0665         u64 interrupt_page;
0666         struct {
0667             u8  msg_sint;
0668             u8  padding1[3];
0669             u32 padding2;
0670         };
0671     };
0672     u64 monitor_page1;
0673     u64 monitor_page2;
0674 } __packed;
0675 
0676 /* Hyper-V socket: guest's connect()-ing to host */
0677 struct vmbus_channel_tl_connect_request {
0678     struct vmbus_channel_message_header header;
0679     guid_t guest_endpoint_id;
0680     guid_t host_service_id;
0681 } __packed;
0682 
0683 /* Modify Channel parameters, cf. vmbus_send_modifychannel() */
0684 struct vmbus_channel_modifychannel {
0685     struct vmbus_channel_message_header header;
0686     u32 child_relid;
0687     u32 target_vp;
0688 } __packed;
0689 
0690 struct vmbus_channel_version_response {
0691     struct vmbus_channel_message_header header;
0692     u8 version_supported;
0693 
0694     u8 connection_state;
0695     u16 padding;
0696 
0697     /*
0698      * On new hosts that support VMBus protocol 5.0, we must use
0699      * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
0700      * and for subsequent messages, we must use the Message Connection ID
0701      * field in the host-returned Version Response Message.
0702      *
0703      * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
0704      */
0705     u32 msg_conn_id;
0706 } __packed;
0707 
0708 enum vmbus_channel_state {
0709     CHANNEL_OFFER_STATE,
0710     CHANNEL_OPENING_STATE,
0711     CHANNEL_OPEN_STATE,
0712     CHANNEL_OPENED_STATE,
0713 };
0714 
0715 /*
0716  * Represents each channel msg on the vmbus connection This is a
0717  * variable-size data structure depending on the msg type itself
0718  */
0719 struct vmbus_channel_msginfo {
0720     /* Bookkeeping stuff */
0721     struct list_head msglistentry;
0722 
0723     /* So far, this is only used to handle gpadl body message */
0724     struct list_head submsglist;
0725 
0726     /* Synchronize the request/response if needed */
0727     struct completion  waitevent;
0728     struct vmbus_channel *waiting_channel;
0729     union {
0730         struct vmbus_channel_version_supported version_supported;
0731         struct vmbus_channel_open_result open_result;
0732         struct vmbus_channel_gpadl_torndown gpadl_torndown;
0733         struct vmbus_channel_gpadl_created gpadl_created;
0734         struct vmbus_channel_version_response version_response;
0735         struct vmbus_channel_modifychannel_response modify_response;
0736     } response;
0737 
0738     u32 msgsize;
0739     /*
0740      * The channel message that goes out on the "wire".
0741      * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
0742      */
0743     unsigned char msg[];
0744 };
0745 
0746 struct vmbus_close_msg {
0747     struct vmbus_channel_msginfo info;
0748     struct vmbus_channel_close_channel msg;
0749 };
0750 
0751 /* Define connection identifier type. */
0752 union hv_connection_id {
0753     u32 asu32;
0754     struct {
0755         u32 id:24;
0756         u32 reserved:8;
0757     } u;
0758 };
0759 
0760 enum vmbus_device_type {
0761     HV_IDE = 0,
0762     HV_SCSI,
0763     HV_FC,
0764     HV_NIC,
0765     HV_ND,
0766     HV_PCIE,
0767     HV_FB,
0768     HV_KBD,
0769     HV_MOUSE,
0770     HV_KVP,
0771     HV_TS,
0772     HV_HB,
0773     HV_SHUTDOWN,
0774     HV_FCOPY,
0775     HV_BACKUP,
0776     HV_DM,
0777     HV_UNKNOWN,
0778 };
0779 
0780 /*
0781  * Provides request ids for VMBus. Encapsulates guest memory
0782  * addresses and stores the next available slot in req_arr
0783  * to generate new ids in constant time.
0784  */
0785 struct vmbus_requestor {
0786     u64 *req_arr;
0787     unsigned long *req_bitmap; /* is a given slot available? */
0788     u32 size;
0789     u64 next_request_id;
0790     spinlock_t req_lock; /* provides atomicity */
0791 };
0792 
0793 #define VMBUS_NO_RQSTOR U64_MAX
0794 #define VMBUS_RQST_ERROR (U64_MAX - 1)
0795 #define VMBUS_RQST_ADDR_ANY U64_MAX
0796 /* NetVSC-specific */
0797 #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
0798 /* StorVSC-specific */
0799 #define VMBUS_RQST_INIT (U64_MAX - 2)
0800 #define VMBUS_RQST_RESET (U64_MAX - 3)
0801 
0802 struct vmbus_device {
0803     u16  dev_type;
0804     guid_t guid;
0805     bool perf_device;
0806     bool allowed_in_isolated;
0807 };
0808 
0809 #define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
0810 
0811 struct vmbus_gpadl {
0812     u32 gpadl_handle;
0813     u32 size;
0814     void *buffer;
0815 };
0816 
0817 struct vmbus_channel {
0818     struct list_head listentry;
0819 
0820     struct hv_device *device_obj;
0821 
0822     enum vmbus_channel_state state;
0823 
0824     struct vmbus_channel_offer_channel offermsg;
0825     /*
0826      * These are based on the OfferMsg.MonitorId.
0827      * Save it here for easy access.
0828      */
0829     u8 monitor_grp;
0830     u8 monitor_bit;
0831 
0832     bool rescind; /* got rescind msg */
0833     bool rescind_ref; /* got rescind msg, got channel reference */
0834     struct completion rescind_event;
0835 
0836     struct vmbus_gpadl ringbuffer_gpadlhandle;
0837 
0838     /* Allocated memory for ring buffer */
0839     struct page *ringbuffer_page;
0840     u32 ringbuffer_pagecount;
0841     u32 ringbuffer_send_offset;
0842     struct hv_ring_buffer_info outbound;    /* send to parent */
0843     struct hv_ring_buffer_info inbound; /* receive from parent */
0844 
0845     struct vmbus_close_msg close_msg;
0846 
0847     /* Statistics */
0848     u64 interrupts; /* Host to Guest interrupts */
0849     u64 sig_events; /* Guest to Host events */
0850 
0851     /*
0852      * Guest to host interrupts caused by the outbound ring buffer changing
0853      * from empty to not empty.
0854      */
0855     u64 intr_out_empty;
0856 
0857     /*
0858      * Indicates that a full outbound ring buffer was encountered. The flag
0859      * is set to true when a full outbound ring buffer is encountered and
0860      * set to false when a write to the outbound ring buffer is completed.
0861      */
0862     bool out_full_flag;
0863 
0864     /* Channel callback's invoked in softirq context */
0865     struct tasklet_struct callback_event;
0866     void (*onchannel_callback)(void *context);
0867     void *channel_callback_context;
0868 
0869     void (*change_target_cpu_callback)(struct vmbus_channel *channel,
0870             u32 old, u32 new);
0871 
0872     /*
0873      * Synchronize channel scheduling and channel removal; see the inline
0874      * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
0875      */
0876     spinlock_t sched_lock;
0877 
0878     /*
0879      * A channel can be marked for one of three modes of reading:
0880      *   BATCHED - callback called from taslket and should read
0881      *            channel until empty. Interrupts from the host
0882      *            are masked while read is in process (default).
0883      *   DIRECT - callback called from tasklet (softirq).
0884      *   ISR - callback called in interrupt context and must
0885      *         invoke its own deferred processing.
0886      *         Host interrupts are disabled and must be re-enabled
0887      *         when ring is empty.
0888      */
0889     enum hv_callback_mode {
0890         HV_CALL_BATCHED,
0891         HV_CALL_DIRECT,
0892         HV_CALL_ISR
0893     } callback_mode;
0894 
0895     bool is_dedicated_interrupt;
0896     u64 sig_event;
0897 
0898     /*
0899      * Starting with win8, this field will be used to specify the
0900      * target CPU on which to deliver the interrupt for the host
0901      * to guest communication.
0902      *
0903      * Prior to win8, incoming channel interrupts would only be
0904      * delivered on CPU 0. Setting this value to 0 would preserve
0905      * the earlier behavior.
0906      */
0907     u32 target_cpu;
0908     /*
0909      * Support for sub-channels. For high performance devices,
0910      * it will be useful to have multiple sub-channels to support
0911      * a scalable communication infrastructure with the host.
0912      * The support for sub-channels is implemented as an extension
0913      * to the current infrastructure.
0914      * The initial offer is considered the primary channel and this
0915      * offer message will indicate if the host supports sub-channels.
0916      * The guest is free to ask for sub-channels to be offered and can
0917      * open these sub-channels as a normal "primary" channel. However,
0918      * all sub-channels will have the same type and instance guids as the
0919      * primary channel. Requests sent on a given channel will result in a
0920      * response on the same channel.
0921      */
0922 
0923     /*
0924      * Sub-channel creation callback. This callback will be called in
0925      * process context when a sub-channel offer is received from the host.
0926      * The guest can open the sub-channel in the context of this callback.
0927      */
0928     void (*sc_creation_callback)(struct vmbus_channel *new_sc);
0929 
0930     /*
0931      * Channel rescind callback. Some channels (the hvsock ones), need to
0932      * register a callback which is invoked in vmbus_onoffer_rescind().
0933      */
0934     void (*chn_rescind_callback)(struct vmbus_channel *channel);
0935 
0936     /*
0937      * All Sub-channels of a primary channel are linked here.
0938      */
0939     struct list_head sc_list;
0940     /*
0941      * The primary channel this sub-channel belongs to.
0942      * This will be NULL for the primary channel.
0943      */
0944     struct vmbus_channel *primary_channel;
0945     /*
0946      * Support per-channel state for use by vmbus drivers.
0947      */
0948     void *per_channel_state;
0949 
0950     /*
0951      * Defer freeing channel until after all cpu's have
0952      * gone through grace period.
0953      */
0954     struct rcu_head rcu;
0955 
0956     /*
0957      * For sysfs per-channel properties.
0958      */
0959     struct kobject          kobj;
0960 
0961     /*
0962      * For performance critical channels (storage, networking
0963      * etc,), Hyper-V has a mechanism to enhance the throughput
0964      * at the expense of latency:
0965      * When the host is to be signaled, we just set a bit in a shared page
0966      * and this bit will be inspected by the hypervisor within a certain
0967      * window and if the bit is set, the host will be signaled. The window
0968      * of time is the monitor latency - currently around 100 usecs. This
0969      * mechanism improves throughput by:
0970      *
0971      * A) Making the host more efficient - each time it wakes up,
0972      *    potentially it will process morev number of packets. The
0973      *    monitor latency allows a batch to build up.
0974      * B) By deferring the hypercall to signal, we will also minimize
0975      *    the interrupts.
0976      *
0977      * Clearly, these optimizations improve throughput at the expense of
0978      * latency. Furthermore, since the channel is shared for both
0979      * control and data messages, control messages currently suffer
0980      * unnecessary latency adversely impacting performance and boot
0981      * time. To fix this issue, permit tagging the channel as being
0982      * in "low latency" mode. In this mode, we will bypass the monitor
0983      * mechanism.
0984      */
0985     bool low_latency;
0986 
0987     bool probe_done;
0988 
0989     /*
0990      * Cache the device ID here for easy access; this is useful, in
0991      * particular, in situations where the channel's device_obj has
0992      * not been allocated/initialized yet.
0993      */
0994     u16 device_id;
0995 
0996     /*
0997      * We must offload the handling of the primary/sub channels
0998      * from the single-threaded vmbus_connection.work_queue to
0999      * two different workqueue, otherwise we can block
1000      * vmbus_connection.work_queue and hang: see vmbus_process_offer().
1001      */
1002     struct work_struct add_channel_work;
1003 
1004     /*
1005      * Guest to host interrupts caused by the inbound ring buffer changing
1006      * from full to not full while a packet is waiting.
1007      */
1008     u64 intr_in_full;
1009 
1010     /*
1011      * The total number of write operations that encountered a full
1012      * outbound ring buffer.
1013      */
1014     u64 out_full_total;
1015 
1016     /*
1017      * The number of write operations that were the first to encounter a
1018      * full outbound ring buffer.
1019      */
1020     u64 out_full_first;
1021 
1022     /* enabling/disabling fuzz testing on the channel (default is false)*/
1023     bool fuzz_testing_state;
1024 
1025     /*
1026      * Interrupt delay will delay the guest from emptying the ring buffer
1027      * for a specific amount of time. The delay is in microseconds and will
1028      * be between 1 to a maximum of 1000, its default is 0 (no delay).
1029      * The  Message delay will delay guest reading on a per message basis
1030      * in microseconds between 1 to 1000 with the default being 0
1031      * (no delay).
1032      */
1033     u32 fuzz_testing_interrupt_delay;
1034     u32 fuzz_testing_message_delay;
1035 
1036     /* callback to generate a request ID from a request address */
1037     u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
1038     /* callback to retrieve a request address from a request ID */
1039     u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
1040 
1041     /* request/transaction ids for VMBus */
1042     struct vmbus_requestor requestor;
1043     u32 rqstor_size;
1044 
1045     /* The max size of a packet on this channel */
1046     u32 max_pkt_size;
1047 };
1048 
1049 #define lock_requestor(channel, flags)                  \
1050 do {                                    \
1051     struct vmbus_requestor *rqstor = &(channel)->requestor;     \
1052                                     \
1053     spin_lock_irqsave(&rqstor->req_lock, flags);            \
1054 } while (0)
1055 
1056 static __always_inline void unlock_requestor(struct vmbus_channel *channel,
1057                          unsigned long flags)
1058 {
1059     struct vmbus_requestor *rqstor = &channel->requestor;
1060 
1061     spin_unlock_irqrestore(&rqstor->req_lock, flags);
1062 }
1063 
1064 u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
1065 u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1066                    u64 rqst_addr);
1067 u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1068                  u64 rqst_addr);
1069 u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
1070 
1071 static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
1072 {
1073     return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1074 }
1075 
1076 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1077 {
1078     return is_hvsock_offer(&c->offermsg);
1079 }
1080 
1081 static inline bool is_sub_channel(const struct vmbus_channel *c)
1082 {
1083     return c->offermsg.offer.sub_channel_index != 0;
1084 }
1085 
1086 static inline void set_channel_read_mode(struct vmbus_channel *c,
1087                     enum hv_callback_mode mode)
1088 {
1089     c->callback_mode = mode;
1090 }
1091 
1092 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1093 {
1094     c->per_channel_state = s;
1095 }
1096 
1097 static inline void *get_per_channel_state(struct vmbus_channel *c)
1098 {
1099     return c->per_channel_state;
1100 }
1101 
1102 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1103                          u32 size)
1104 {
1105     unsigned long flags;
1106 
1107     if (size) {
1108         spin_lock_irqsave(&c->outbound.ring_lock, flags);
1109         ++c->out_full_total;
1110 
1111         if (!c->out_full_flag) {
1112             ++c->out_full_first;
1113             c->out_full_flag = true;
1114         }
1115         spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1116     } else {
1117         c->out_full_flag = false;
1118     }
1119 
1120     c->outbound.ring_buffer->pending_send_sz = size;
1121 }
1122 
1123 void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1124 
1125 int vmbus_request_offers(void);
1126 
1127 /*
1128  * APIs for managing sub-channels.
1129  */
1130 
1131 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1132             void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1133 
1134 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1135         void (*chn_rescind_cb)(struct vmbus_channel *));
1136 
1137 /* The format must be the same as struct vmdata_gpa_direct */
1138 struct vmbus_channel_packet_page_buffer {
1139     u16 type;
1140     u16 dataoffset8;
1141     u16 length8;
1142     u16 flags;
1143     u64 transactionid;
1144     u32 reserved;
1145     u32 rangecount;
1146     struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1147 } __packed;
1148 
1149 /* The format must be the same as struct vmdata_gpa_direct */
1150 struct vmbus_channel_packet_multipage_buffer {
1151     u16 type;
1152     u16 dataoffset8;
1153     u16 length8;
1154     u16 flags;
1155     u64 transactionid;
1156     u32 reserved;
1157     u32 rangecount;     /* Always 1 in this case */
1158     struct hv_multipage_buffer range;
1159 } __packed;
1160 
1161 /* The format must be the same as struct vmdata_gpa_direct */
1162 struct vmbus_packet_mpb_array {
1163     u16 type;
1164     u16 dataoffset8;
1165     u16 length8;
1166     u16 flags;
1167     u64 transactionid;
1168     u32 reserved;
1169     u32 rangecount;         /* Always 1 in this case */
1170     struct hv_mpb_array range;
1171 } __packed;
1172 
1173 int vmbus_alloc_ring(struct vmbus_channel *channel,
1174              u32 send_size, u32 recv_size);
1175 void vmbus_free_ring(struct vmbus_channel *channel);
1176 
1177 int vmbus_connect_ring(struct vmbus_channel *channel,
1178                void (*onchannel_callback)(void *context),
1179                void *context);
1180 int vmbus_disconnect_ring(struct vmbus_channel *channel);
1181 
1182 extern int vmbus_open(struct vmbus_channel *channel,
1183                 u32 send_ringbuffersize,
1184                 u32 recv_ringbuffersize,
1185                 void *userdata,
1186                 u32 userdatalen,
1187                 void (*onchannel_callback)(void *context),
1188                 void *context);
1189 
1190 extern void vmbus_close(struct vmbus_channel *channel);
1191 
1192 extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
1193                   void *buffer,
1194                   u32 bufferLen,
1195                   u64 requestid,
1196                   u64 *trans_id,
1197                   enum vmbus_packet_type type,
1198                   u32 flags);
1199 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1200                   void *buffer,
1201                   u32 bufferLen,
1202                   u64 requestid,
1203                   enum vmbus_packet_type type,
1204                   u32 flags);
1205 
1206 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1207                         struct hv_page_buffer pagebuffers[],
1208                         u32 pagecount,
1209                         void *buffer,
1210                         u32 bufferlen,
1211                         u64 requestid);
1212 
1213 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1214                      struct vmbus_packet_mpb_array *mpb,
1215                      u32 desc_size,
1216                      void *buffer,
1217                      u32 bufferlen,
1218                      u64 requestid);
1219 
1220 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1221                       void *kbuffer,
1222                       u32 size,
1223                       struct vmbus_gpadl *gpadl);
1224 
1225 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1226                      struct vmbus_gpadl *gpadl);
1227 
1228 void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1229 
1230 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1231                   void *buffer,
1232                   u32 bufferlen,
1233                   u32 *buffer_actual_len,
1234                   u64 *requestid);
1235 
1236 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1237                      void *buffer,
1238                      u32 bufferlen,
1239                      u32 *buffer_actual_len,
1240                      u64 *requestid);
1241 
1242 
1243 extern void vmbus_ontimer(unsigned long data);
1244 
1245 /* Base driver object */
1246 struct hv_driver {
1247     const char *name;
1248 
1249     /*
1250      * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1251      * channel flag, actually doesn't mean a synthetic device because the
1252      * offer's if_type/if_instance can change for every new hvsock
1253      * connection.
1254      *
1255      * However, to facilitate the notification of new-offer/rescind-offer
1256      * from vmbus driver to hvsock driver, we can handle hvsock offer as
1257      * a special vmbus device, and hence we need the below flag to
1258      * indicate if the driver is the hvsock driver or not: we need to
1259      * specially treat the hvosck offer & driver in vmbus_match().
1260      */
1261     bool hvsock;
1262 
1263     /* the device type supported by this driver */
1264     guid_t dev_type;
1265     const struct hv_vmbus_device_id *id_table;
1266 
1267     struct device_driver driver;
1268 
1269     /* dynamic device GUID's */
1270     struct  {
1271         spinlock_t lock;
1272         struct list_head list;
1273     } dynids;
1274 
1275     int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1276     int (*remove)(struct hv_device *);
1277     void (*shutdown)(struct hv_device *);
1278 
1279     int (*suspend)(struct hv_device *);
1280     int (*resume)(struct hv_device *);
1281 
1282 };
1283 
1284 /* Base device object */
1285 struct hv_device {
1286     /* the device type id of this device */
1287     guid_t dev_type;
1288 
1289     /* the device instance id of this device */
1290     guid_t dev_instance;
1291     u16 vendor_id;
1292     u16 device_id;
1293 
1294     struct device device;
1295     /*
1296      * Driver name to force a match.  Do not set directly, because core
1297      * frees it.  Use driver_set_override() to set or clear it.
1298      */
1299     const char *driver_override;
1300 
1301     struct vmbus_channel *channel;
1302     struct kset      *channels_kset;
1303     struct device_dma_parameters dma_parms;
1304     u64 dma_mask;
1305 
1306     /* place holder to keep track of the dir for hv device in debugfs */
1307     struct dentry *debug_dir;
1308 
1309 };
1310 
1311 
1312 static inline struct hv_device *device_to_hv_device(struct device *d)
1313 {
1314     return container_of(d, struct hv_device, device);
1315 }
1316 
1317 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1318 {
1319     return container_of(d, struct hv_driver, driver);
1320 }
1321 
1322 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1323 {
1324     dev_set_drvdata(&dev->device, data);
1325 }
1326 
1327 static inline void *hv_get_drvdata(struct hv_device *dev)
1328 {
1329     return dev_get_drvdata(&dev->device);
1330 }
1331 
1332 struct hv_ring_buffer_debug_info {
1333     u32 current_interrupt_mask;
1334     u32 current_read_index;
1335     u32 current_write_index;
1336     u32 bytes_avail_toread;
1337     u32 bytes_avail_towrite;
1338 };
1339 
1340 
1341 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1342                 struct hv_ring_buffer_debug_info *debug_info);
1343 
1344 /* Vmbus interface */
1345 #define vmbus_driver_register(driver)   \
1346     __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1347 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1348                      struct module *owner,
1349                      const char *mod_name);
1350 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1351 
1352 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1353 
1354 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1355             resource_size_t min, resource_size_t max,
1356             resource_size_t size, resource_size_t align,
1357             bool fb_overlap_ok);
1358 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1359 
1360 /*
1361  * GUID definitions of various offer types - services offered to the guest.
1362  */
1363 
1364 /*
1365  * Network GUID
1366  * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1367  */
1368 #define HV_NIC_GUID \
1369     .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1370               0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1371 
1372 /*
1373  * IDE GUID
1374  * {32412632-86cb-44a2-9b5c-50d1417354f5}
1375  */
1376 #define HV_IDE_GUID \
1377     .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1378               0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1379 
1380 /*
1381  * SCSI GUID
1382  * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1383  */
1384 #define HV_SCSI_GUID \
1385     .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1386               0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1387 
1388 /*
1389  * Shutdown GUID
1390  * {0e0b6031-5213-4934-818b-38d90ced39db}
1391  */
1392 #define HV_SHUTDOWN_GUID \
1393     .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1394               0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1395 
1396 /*
1397  * Time Synch GUID
1398  * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1399  */
1400 #define HV_TS_GUID \
1401     .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1402               0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1403 
1404 /*
1405  * Heartbeat GUID
1406  * {57164f39-9115-4e78-ab55-382f3bd5422d}
1407  */
1408 #define HV_HEART_BEAT_GUID \
1409     .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1410               0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1411 
1412 /*
1413  * KVP GUID
1414  * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1415  */
1416 #define HV_KVP_GUID \
1417     .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1418               0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1419 
1420 /*
1421  * Dynamic memory GUID
1422  * {525074dc-8985-46e2-8057-a307dc18a502}
1423  */
1424 #define HV_DM_GUID \
1425     .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1426               0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1427 
1428 /*
1429  * Mouse GUID
1430  * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1431  */
1432 #define HV_MOUSE_GUID \
1433     .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1434               0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1435 
1436 /*
1437  * Keyboard GUID
1438  * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1439  */
1440 #define HV_KBD_GUID \
1441     .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1442               0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1443 
1444 /*
1445  * VSS (Backup/Restore) GUID
1446  */
1447 #define HV_VSS_GUID \
1448     .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1449               0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1450 /*
1451  * Synthetic Video GUID
1452  * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1453  */
1454 #define HV_SYNTHVID_GUID \
1455     .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1456               0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1457 
1458 /*
1459  * Synthetic FC GUID
1460  * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1461  */
1462 #define HV_SYNTHFC_GUID \
1463     .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1464               0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1465 
1466 /*
1467  * Guest File Copy Service
1468  * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1469  */
1470 
1471 #define HV_FCOPY_GUID \
1472     .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1473               0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1474 
1475 /*
1476  * NetworkDirect. This is the guest RDMA service.
1477  * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1478  */
1479 #define HV_ND_GUID \
1480     .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1481               0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1482 
1483 /*
1484  * PCI Express Pass Through
1485  * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1486  */
1487 
1488 #define HV_PCIE_GUID \
1489     .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1490               0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1491 
1492 /*
1493  * Linux doesn't support these 4 devices: the first two are for
1494  * Automatic Virtual Machine Activation, the third is for
1495  * Remote Desktop Virtualization, and the fourth is Initial
1496  * Machine Configuration (IMC) used only by Windows guests.
1497  * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1498  * {3375baf4-9e15-4b30-b765-67acb10d607b}
1499  * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1500  * {c376c1c3-d276-48d2-90a9-c04748072c60}
1501  */
1502 
1503 #define HV_AVMA1_GUID \
1504     .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1505               0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1506 
1507 #define HV_AVMA2_GUID \
1508     .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1509               0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1510 
1511 #define HV_RDV_GUID \
1512     .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1513               0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1514 
1515 #define HV_IMC_GUID \
1516     .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
1517               0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
1518 
1519 /*
1520  * Common header for Hyper-V ICs
1521  */
1522 
1523 #define ICMSGTYPE_NEGOTIATE     0
1524 #define ICMSGTYPE_HEARTBEAT     1
1525 #define ICMSGTYPE_KVPEXCHANGE       2
1526 #define ICMSGTYPE_SHUTDOWN      3
1527 #define ICMSGTYPE_TIMESYNC      4
1528 #define ICMSGTYPE_VSS           5
1529 #define ICMSGTYPE_FCOPY         7
1530 
1531 #define ICMSGHDRFLAG_TRANSACTION    1
1532 #define ICMSGHDRFLAG_REQUEST        2
1533 #define ICMSGHDRFLAG_RESPONSE       4
1534 
1535 
1536 /*
1537  * While we want to handle util services as regular devices,
1538  * there is only one instance of each of these services; so
1539  * we statically allocate the service specific state.
1540  */
1541 
1542 struct hv_util_service {
1543     u8 *recv_buffer;
1544     void *channel;
1545     void (*util_cb)(void *);
1546     int (*util_init)(struct hv_util_service *);
1547     void (*util_deinit)(void);
1548     int (*util_pre_suspend)(void);
1549     int (*util_pre_resume)(void);
1550 };
1551 
1552 struct vmbuspipe_hdr {
1553     u32 flags;
1554     u32 msgsize;
1555 } __packed;
1556 
1557 struct ic_version {
1558     u16 major;
1559     u16 minor;
1560 } __packed;
1561 
1562 struct icmsg_hdr {
1563     struct ic_version icverframe;
1564     u16 icmsgtype;
1565     struct ic_version icvermsg;
1566     u16 icmsgsize;
1567     u32 status;
1568     u8 ictransaction_id;
1569     u8 icflags;
1570     u8 reserved[2];
1571 } __packed;
1572 
1573 #define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
1574 #define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
1575 #define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
1576     (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
1577      (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
1578 
1579 struct icmsg_negotiate {
1580     u16 icframe_vercnt;
1581     u16 icmsg_vercnt;
1582     u32 reserved;
1583     struct ic_version icversion_data[]; /* any size array */
1584 } __packed;
1585 
1586 struct shutdown_msg_data {
1587     u32 reason_code;
1588     u32 timeout_seconds;
1589     u32 flags;
1590     u8  display_message[2048];
1591 } __packed;
1592 
1593 struct heartbeat_msg_data {
1594     u64 seq_num;
1595     u32 reserved[8];
1596 } __packed;
1597 
1598 /* Time Sync IC defs */
1599 #define ICTIMESYNCFLAG_PROBE    0
1600 #define ICTIMESYNCFLAG_SYNC 1
1601 #define ICTIMESYNCFLAG_SAMPLE   2
1602 
1603 #ifdef __x86_64__
1604 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1605 #else
1606 #define WLTIMEDELTA 116444736000000000LL
1607 #endif
1608 
1609 struct ictimesync_data {
1610     u64 parenttime;
1611     u64 childtime;
1612     u64 roundtriptime;
1613     u8 flags;
1614 } __packed;
1615 
1616 struct ictimesync_ref_data {
1617     u64 parenttime;
1618     u64 vmreferencetime;
1619     u8 flags;
1620     char leapflags;
1621     char stratum;
1622     u8 reserved[3];
1623 } __packed;
1624 
1625 struct hyperv_service_callback {
1626     u8 msg_type;
1627     char *log_msg;
1628     guid_t data;
1629     struct vmbus_channel *channel;
1630     void (*callback)(void *context);
1631 };
1632 
1633 struct hv_dma_range {
1634     dma_addr_t dma;
1635     u32 mapping_size;
1636 };
1637 
1638 #define MAX_SRV_VER 0x7ffffff
1639 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
1640                 const int *fw_version, int fw_vercnt,
1641                 const int *srv_version, int srv_vercnt,
1642                 int *nego_fw_version, int *nego_srv_version);
1643 
1644 void hv_process_channel_removal(struct vmbus_channel *channel);
1645 
1646 void vmbus_setevent(struct vmbus_channel *channel);
1647 /*
1648  * Negotiated version with the Host.
1649  */
1650 
1651 extern __u32 vmbus_proto_version;
1652 
1653 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1654                   const guid_t *shv_host_servie_id);
1655 int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
1656 void vmbus_set_event(struct vmbus_channel *channel);
1657 
1658 /* Get the start of the ring buffer. */
1659 static inline void *
1660 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1661 {
1662     return ring_info->ring_buffer->buffer;
1663 }
1664 
1665 /*
1666  * Mask off host interrupt callback notifications
1667  */
1668 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1669 {
1670     rbi->ring_buffer->interrupt_mask = 1;
1671 
1672     /* make sure mask update is not reordered */
1673     virt_mb();
1674 }
1675 
1676 /*
1677  * Re-enable host callback and return number of outstanding bytes
1678  */
1679 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1680 {
1681 
1682     rbi->ring_buffer->interrupt_mask = 0;
1683 
1684     /* make sure mask update is not reordered */
1685     virt_mb();
1686 
1687     /*
1688      * Now check to see if the ring buffer is still empty.
1689      * If it is not, we raced and we need to process new
1690      * incoming messages.
1691      */
1692     return hv_get_bytes_to_read(rbi);
1693 }
1694 
1695 /*
1696  * An API to support in-place processing of incoming VMBUS packets.
1697  */
1698 
1699 /* Get data payload associated with descriptor */
1700 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1701 {
1702     return (void *)((unsigned long)desc + (desc->offset8 << 3));
1703 }
1704 
1705 /* Get data size associated with descriptor */
1706 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1707 {
1708     return (desc->len8 << 3) - (desc->offset8 << 3);
1709 }
1710 
1711 /* Get packet length associated with descriptor */
1712 static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
1713 {
1714     return desc->len8 << 3;
1715 }
1716 
1717 struct vmpacket_descriptor *
1718 hv_pkt_iter_first(struct vmbus_channel *channel);
1719 
1720 struct vmpacket_descriptor *
1721 __hv_pkt_iter_next(struct vmbus_channel *channel,
1722            const struct vmpacket_descriptor *pkt);
1723 
1724 void hv_pkt_iter_close(struct vmbus_channel *channel);
1725 
1726 static inline struct vmpacket_descriptor *
1727 hv_pkt_iter_next(struct vmbus_channel *channel,
1728          const struct vmpacket_descriptor *pkt)
1729 {
1730     struct vmpacket_descriptor *nxt;
1731 
1732     nxt = __hv_pkt_iter_next(channel, pkt);
1733     if (!nxt)
1734         hv_pkt_iter_close(channel);
1735 
1736     return nxt;
1737 }
1738 
1739 #define foreach_vmbus_pkt(pkt, channel) \
1740     for (pkt = hv_pkt_iter_first(channel); pkt; \
1741         pkt = hv_pkt_iter_next(channel, pkt))
1742 
1743 /*
1744  * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1745  * sends requests to read and write blocks. Each block must be 128 bytes or
1746  * smaller. Optionally, the VF driver can register a callback function which
1747  * will be invoked when the host says that one or more of the first 64 block
1748  * IDs is "invalid" which means that the VF driver should reread them.
1749  */
1750 #define HV_CONFIG_BLOCK_SIZE_MAX 128
1751 
1752 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1753             unsigned int block_id, unsigned int *bytes_returned);
1754 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1755              unsigned int block_id);
1756 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1757                 void (*block_invalidate)(void *context,
1758                              u64 block_mask));
1759 
1760 struct hyperv_pci_block_ops {
1761     int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1762               unsigned int block_id, unsigned int *bytes_returned);
1763     int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1764                unsigned int block_id);
1765     int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1766                   void (*block_invalidate)(void *context,
1767                                u64 block_mask));
1768 };
1769 
1770 extern struct hyperv_pci_block_ops hvpci_block_ops;
1771 
1772 static inline unsigned long virt_to_hvpfn(void *addr)
1773 {
1774     phys_addr_t paddr;
1775 
1776     if (is_vmalloc_addr(addr))
1777         paddr = page_to_phys(vmalloc_to_page(addr)) +
1778                      offset_in_page(addr);
1779     else
1780         paddr = __pa(addr);
1781 
1782     return  paddr >> HV_HYP_PAGE_SHIFT;
1783 }
1784 
1785 #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1786 #define offset_in_hvpage(ptr)   ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1787 #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1788 #define HVPFN_DOWN(x)   ((x) >> HV_HYP_PAGE_SHIFT)
1789 #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1790 
1791 #endif /* _HYPERV_H */