0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef _HYPERV_H
0013 #define _HYPERV_H
0014
0015 #include <uapi/linux/hyperv.h>
0016
0017 #include <linux/mm.h>
0018 #include <linux/types.h>
0019 #include <linux/scatterlist.h>
0020 #include <linux/list.h>
0021 #include <linux/timer.h>
0022 #include <linux/completion.h>
0023 #include <linux/device.h>
0024 #include <linux/mod_devicetable.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/reciprocal_div.h>
0027 #include <asm/hyperv-tlfs.h>
0028
0029 #define MAX_PAGE_BUFFER_COUNT 32
0030 #define MAX_MULTIPAGE_BUFFER_COUNT 32
0031
0032 #pragma pack(push, 1)
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 enum hv_gpadl_type {
0072 HV_GPADL_BUFFER,
0073 HV_GPADL_RING
0074 };
0075
0076
0077 struct hv_page_buffer {
0078 u32 len;
0079 u32 offset;
0080 u64 pfn;
0081 };
0082
0083
0084 struct hv_multipage_buffer {
0085
0086 u32 len;
0087 u32 offset;
0088 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
0089 };
0090
0091
0092
0093
0094
0095
0096 struct hv_mpb_array {
0097
0098 u32 len;
0099 u32 offset;
0100 u64 pfn_array[];
0101 };
0102
0103
0104 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
0105 (sizeof(struct hv_page_buffer) * \
0106 MAX_PAGE_BUFFER_COUNT))
0107 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
0108 sizeof(struct hv_multipage_buffer))
0109
0110
0111 #pragma pack(pop)
0112
0113 struct hv_ring_buffer {
0114
0115 u32 write_index;
0116
0117
0118 u32 read_index;
0119
0120 u32 interrupt_mask;
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148 u32 pending_send_sz;
0149 u32 reserved1[12];
0150 union {
0151 struct {
0152 u32 feat_pending_send_sz:1;
0153 };
0154 u32 value;
0155 } feature_bits;
0156
0157
0158 u8 reserved2[PAGE_SIZE - 68];
0159
0160
0161
0162
0163
0164 u8 buffer[];
0165 } __packed;
0166
0167
0168 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
0169 (payload_sz))
0170
0171 struct hv_ring_buffer_info {
0172 struct hv_ring_buffer *ring_buffer;
0173 u32 ring_size;
0174 struct reciprocal_value ring_size_div10_reciprocal;
0175 spinlock_t ring_lock;
0176
0177 u32 ring_datasize;
0178 u32 priv_read_index;
0179
0180
0181
0182
0183 struct mutex ring_buffer_mutex;
0184
0185
0186 void *pkt_buffer;
0187 u32 pkt_buffer_size;
0188 };
0189
0190
0191 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
0192 {
0193 u32 read_loc, write_loc, dsize, read;
0194
0195 dsize = rbi->ring_datasize;
0196 read_loc = rbi->ring_buffer->read_index;
0197 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
0198
0199 read = write_loc >= read_loc ? (write_loc - read_loc) :
0200 (dsize - read_loc) + write_loc;
0201
0202 return read;
0203 }
0204
0205 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
0206 {
0207 u32 read_loc, write_loc, dsize, write;
0208
0209 dsize = rbi->ring_datasize;
0210 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
0211 write_loc = rbi->ring_buffer->write_index;
0212
0213 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
0214 read_loc - write_loc;
0215 return write;
0216 }
0217
0218 static inline u32 hv_get_avail_to_write_percent(
0219 const struct hv_ring_buffer_info *rbi)
0220 {
0221 u32 avail_write = hv_get_bytes_to_write(rbi);
0222
0223 return reciprocal_divide(
0224 (avail_write << 3) + (avail_write << 1),
0225 rbi->ring_size_div10_reciprocal);
0226 }
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 #define VERSION_WS2008 ((0 << 16) | (13))
0249 #define VERSION_WIN7 ((1 << 16) | (1))
0250 #define VERSION_WIN8 ((2 << 16) | (4))
0251 #define VERSION_WIN8_1 ((3 << 16) | (0))
0252 #define VERSION_WIN10 ((4 << 16) | (0))
0253 #define VERSION_WIN10_V4_1 ((4 << 16) | (1))
0254 #define VERSION_WIN10_V5 ((5 << 16) | (0))
0255 #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
0256 #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
0257 #define VERSION_WIN10_V5_3 ((5 << 16) | (3))
0258
0259
0260 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
0261
0262
0263 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
0264 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
0265
0266
0267 #define MAX_USER_DEFINED_BYTES 120
0268
0269
0270 #define MAX_PIPE_USER_DEFINED_BYTES 116
0271
0272
0273
0274
0275
0276 struct vmbus_channel_offer {
0277 guid_t if_type;
0278 guid_t if_instance;
0279
0280
0281
0282
0283 u64 reserved1;
0284 u64 reserved2;
0285
0286 u16 chn_flags;
0287 u16 mmio_megabytes;
0288
0289 union {
0290
0291 struct {
0292 unsigned char user_def[MAX_USER_DEFINED_BYTES];
0293 } std;
0294
0295
0296
0297
0298
0299
0300
0301
0302 struct {
0303 u32 pipe_mode;
0304 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
0305 } pipe;
0306 } u;
0307
0308
0309
0310
0311
0312
0313 u16 sub_channel_index;
0314 u16 reserved3;
0315 } __packed;
0316
0317
0318 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
0319 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
0320 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
0321 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
0322 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
0323 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
0324 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
0325 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
0326
0327 struct vmpacket_descriptor {
0328 u16 type;
0329 u16 offset8;
0330 u16 len8;
0331 u16 flags;
0332 u64 trans_id;
0333 } __packed;
0334
0335 struct vmpacket_header {
0336 u32 prev_pkt_start_offset;
0337 struct vmpacket_descriptor descriptor;
0338 } __packed;
0339
0340 struct vmtransfer_page_range {
0341 u32 byte_count;
0342 u32 byte_offset;
0343 } __packed;
0344
0345 struct vmtransfer_page_packet_header {
0346 struct vmpacket_descriptor d;
0347 u16 xfer_pageset_id;
0348 u8 sender_owns_set;
0349 u8 reserved;
0350 u32 range_cnt;
0351 struct vmtransfer_page_range ranges[1];
0352 } __packed;
0353
0354 struct vmgpadl_packet_header {
0355 struct vmpacket_descriptor d;
0356 u32 gpadl;
0357 u32 reserved;
0358 } __packed;
0359
0360 struct vmadd_remove_transfer_page_set {
0361 struct vmpacket_descriptor d;
0362 u32 gpadl;
0363 u16 xfer_pageset_id;
0364 u16 reserved;
0365 } __packed;
0366
0367
0368
0369
0370
0371 struct gpa_range {
0372 u32 byte_count;
0373 u32 byte_offset;
0374 u64 pfn_array[];
0375 };
0376
0377
0378
0379
0380
0381
0382
0383
0384 struct vmestablish_gpadl {
0385 struct vmpacket_descriptor d;
0386 u32 gpadl;
0387 u32 range_cnt;
0388 struct gpa_range range[1];
0389 } __packed;
0390
0391
0392
0393
0394
0395 struct vmteardown_gpadl {
0396 struct vmpacket_descriptor d;
0397 u32 gpadl;
0398 u32 reserved;
0399 } __packed;
0400
0401
0402
0403
0404
0405 struct vmdata_gpa_direct {
0406 struct vmpacket_descriptor d;
0407 u32 reserved;
0408 u32 range_cnt;
0409 struct gpa_range range[1];
0410 } __packed;
0411
0412
0413 struct vmadditional_data {
0414 struct vmpacket_descriptor d;
0415 u64 total_bytes;
0416 u32 offset;
0417 u32 byte_cnt;
0418 unsigned char data[1];
0419 } __packed;
0420
0421 union vmpacket_largest_possible_header {
0422 struct vmpacket_descriptor simple_hdr;
0423 struct vmtransfer_page_packet_header xfer_page_hdr;
0424 struct vmgpadl_packet_header gpadl_hdr;
0425 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
0426 struct vmestablish_gpadl establish_gpadl_hdr;
0427 struct vmteardown_gpadl teardown_gpadl_hdr;
0428 struct vmdata_gpa_direct data_gpa_direct_hdr;
0429 };
0430
0431 #define VMPACKET_DATA_START_ADDRESS(__packet) \
0432 (void *)(((unsigned char *)__packet) + \
0433 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
0434
0435 #define VMPACKET_DATA_LENGTH(__packet) \
0436 ((((struct vmpacket_descriptor)__packet)->len8 - \
0437 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
0438
0439 #define VMPACKET_TRANSFER_MODE(__packet) \
0440 (((struct IMPACT)__packet)->type)
0441
0442 enum vmbus_packet_type {
0443 VM_PKT_INVALID = 0x0,
0444 VM_PKT_SYNCH = 0x1,
0445 VM_PKT_ADD_XFER_PAGESET = 0x2,
0446 VM_PKT_RM_XFER_PAGESET = 0x3,
0447 VM_PKT_ESTABLISH_GPADL = 0x4,
0448 VM_PKT_TEARDOWN_GPADL = 0x5,
0449 VM_PKT_DATA_INBAND = 0x6,
0450 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
0451 VM_PKT_DATA_USING_GPADL = 0x8,
0452 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
0453 VM_PKT_CANCEL_REQUEST = 0xa,
0454 VM_PKT_COMP = 0xb,
0455 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
0456 VM_PKT_ADDITIONAL_DATA = 0xd
0457 };
0458
0459 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
0460
0461
0462
0463 enum vmbus_channel_message_type {
0464 CHANNELMSG_INVALID = 0,
0465 CHANNELMSG_OFFERCHANNEL = 1,
0466 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
0467 CHANNELMSG_REQUESTOFFERS = 3,
0468 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
0469 CHANNELMSG_OPENCHANNEL = 5,
0470 CHANNELMSG_OPENCHANNEL_RESULT = 6,
0471 CHANNELMSG_CLOSECHANNEL = 7,
0472 CHANNELMSG_GPADL_HEADER = 8,
0473 CHANNELMSG_GPADL_BODY = 9,
0474 CHANNELMSG_GPADL_CREATED = 10,
0475 CHANNELMSG_GPADL_TEARDOWN = 11,
0476 CHANNELMSG_GPADL_TORNDOWN = 12,
0477 CHANNELMSG_RELID_RELEASED = 13,
0478 CHANNELMSG_INITIATE_CONTACT = 14,
0479 CHANNELMSG_VERSION_RESPONSE = 15,
0480 CHANNELMSG_UNLOAD = 16,
0481 CHANNELMSG_UNLOAD_RESPONSE = 17,
0482 CHANNELMSG_18 = 18,
0483 CHANNELMSG_19 = 19,
0484 CHANNELMSG_20 = 20,
0485 CHANNELMSG_TL_CONNECT_REQUEST = 21,
0486 CHANNELMSG_MODIFYCHANNEL = 22,
0487 CHANNELMSG_TL_CONNECT_RESULT = 23,
0488 CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24,
0489 CHANNELMSG_COUNT
0490 };
0491
0492
0493 #define INVALID_RELID U32_MAX
0494
0495 struct vmbus_channel_message_header {
0496 enum vmbus_channel_message_type msgtype;
0497 u32 padding;
0498 } __packed;
0499
0500
0501 struct vmbus_channel_query_vmbus_version {
0502 struct vmbus_channel_message_header header;
0503 u32 version;
0504 } __packed;
0505
0506
0507 struct vmbus_channel_version_supported {
0508 struct vmbus_channel_message_header header;
0509 u8 version_supported;
0510 } __packed;
0511
0512
0513 struct vmbus_channel_offer_channel {
0514 struct vmbus_channel_message_header header;
0515 struct vmbus_channel_offer offer;
0516 u32 child_relid;
0517 u8 monitorid;
0518
0519
0520
0521 u8 monitor_allocated:1;
0522 u8 reserved:7;
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534 u16 is_dedicated_interrupt:1;
0535 u16 reserved1:15;
0536 u32 connection_id;
0537 } __packed;
0538
0539
0540 struct vmbus_channel_rescind_offer {
0541 struct vmbus_channel_message_header header;
0542 u32 child_relid;
0543 } __packed;
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 struct vmbus_channel_open_channel {
0556 struct vmbus_channel_message_header header;
0557
0558
0559 u32 child_relid;
0560
0561
0562 u32 openid;
0563
0564
0565 u32 ringbuffer_gpadlhandle;
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575 u32 target_vp;
0576
0577
0578
0579
0580
0581
0582 u32 downstream_ringbuffer_pageoffset;
0583
0584
0585 unsigned char userdata[MAX_USER_DEFINED_BYTES];
0586 } __packed;
0587
0588
0589 struct vmbus_channel_open_result {
0590 struct vmbus_channel_message_header header;
0591 u32 child_relid;
0592 u32 openid;
0593 u32 status;
0594 } __packed;
0595
0596
0597 struct vmbus_channel_modifychannel_response {
0598 struct vmbus_channel_message_header header;
0599 u32 child_relid;
0600 u32 status;
0601 } __packed;
0602
0603
0604 struct vmbus_channel_close_channel {
0605 struct vmbus_channel_message_header header;
0606 u32 child_relid;
0607 } __packed;
0608
0609
0610 #define GPADL_TYPE_RING_BUFFER 1
0611 #define GPADL_TYPE_SERVER_SAVE_AREA 2
0612 #define GPADL_TYPE_TRANSACTION 8
0613
0614
0615
0616
0617
0618
0619
0620 struct vmbus_channel_gpadl_header {
0621 struct vmbus_channel_message_header header;
0622 u32 child_relid;
0623 u32 gpadl;
0624 u16 range_buflen;
0625 u16 rangecount;
0626 struct gpa_range range[];
0627 } __packed;
0628
0629
0630 struct vmbus_channel_gpadl_body {
0631 struct vmbus_channel_message_header header;
0632 u32 msgnumber;
0633 u32 gpadl;
0634 u64 pfn[];
0635 } __packed;
0636
0637 struct vmbus_channel_gpadl_created {
0638 struct vmbus_channel_message_header header;
0639 u32 child_relid;
0640 u32 gpadl;
0641 u32 creation_status;
0642 } __packed;
0643
0644 struct vmbus_channel_gpadl_teardown {
0645 struct vmbus_channel_message_header header;
0646 u32 child_relid;
0647 u32 gpadl;
0648 } __packed;
0649
0650 struct vmbus_channel_gpadl_torndown {
0651 struct vmbus_channel_message_header header;
0652 u32 gpadl;
0653 } __packed;
0654
0655 struct vmbus_channel_relid_released {
0656 struct vmbus_channel_message_header header;
0657 u32 child_relid;
0658 } __packed;
0659
0660 struct vmbus_channel_initiate_contact {
0661 struct vmbus_channel_message_header header;
0662 u32 vmbus_version_requested;
0663 u32 target_vcpu;
0664 union {
0665 u64 interrupt_page;
0666 struct {
0667 u8 msg_sint;
0668 u8 padding1[3];
0669 u32 padding2;
0670 };
0671 };
0672 u64 monitor_page1;
0673 u64 monitor_page2;
0674 } __packed;
0675
0676
0677 struct vmbus_channel_tl_connect_request {
0678 struct vmbus_channel_message_header header;
0679 guid_t guest_endpoint_id;
0680 guid_t host_service_id;
0681 } __packed;
0682
0683
0684 struct vmbus_channel_modifychannel {
0685 struct vmbus_channel_message_header header;
0686 u32 child_relid;
0687 u32 target_vp;
0688 } __packed;
0689
0690 struct vmbus_channel_version_response {
0691 struct vmbus_channel_message_header header;
0692 u8 version_supported;
0693
0694 u8 connection_state;
0695 u16 padding;
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 u32 msg_conn_id;
0706 } __packed;
0707
0708 enum vmbus_channel_state {
0709 CHANNEL_OFFER_STATE,
0710 CHANNEL_OPENING_STATE,
0711 CHANNEL_OPEN_STATE,
0712 CHANNEL_OPENED_STATE,
0713 };
0714
0715
0716
0717
0718
0719 struct vmbus_channel_msginfo {
0720
0721 struct list_head msglistentry;
0722
0723
0724 struct list_head submsglist;
0725
0726
0727 struct completion waitevent;
0728 struct vmbus_channel *waiting_channel;
0729 union {
0730 struct vmbus_channel_version_supported version_supported;
0731 struct vmbus_channel_open_result open_result;
0732 struct vmbus_channel_gpadl_torndown gpadl_torndown;
0733 struct vmbus_channel_gpadl_created gpadl_created;
0734 struct vmbus_channel_version_response version_response;
0735 struct vmbus_channel_modifychannel_response modify_response;
0736 } response;
0737
0738 u32 msgsize;
0739
0740
0741
0742
0743 unsigned char msg[];
0744 };
0745
0746 struct vmbus_close_msg {
0747 struct vmbus_channel_msginfo info;
0748 struct vmbus_channel_close_channel msg;
0749 };
0750
0751
0752 union hv_connection_id {
0753 u32 asu32;
0754 struct {
0755 u32 id:24;
0756 u32 reserved:8;
0757 } u;
0758 };
0759
0760 enum vmbus_device_type {
0761 HV_IDE = 0,
0762 HV_SCSI,
0763 HV_FC,
0764 HV_NIC,
0765 HV_ND,
0766 HV_PCIE,
0767 HV_FB,
0768 HV_KBD,
0769 HV_MOUSE,
0770 HV_KVP,
0771 HV_TS,
0772 HV_HB,
0773 HV_SHUTDOWN,
0774 HV_FCOPY,
0775 HV_BACKUP,
0776 HV_DM,
0777 HV_UNKNOWN,
0778 };
0779
0780
0781
0782
0783
0784
0785 struct vmbus_requestor {
0786 u64 *req_arr;
0787 unsigned long *req_bitmap;
0788 u32 size;
0789 u64 next_request_id;
0790 spinlock_t req_lock;
0791 };
0792
0793 #define VMBUS_NO_RQSTOR U64_MAX
0794 #define VMBUS_RQST_ERROR (U64_MAX - 1)
0795 #define VMBUS_RQST_ADDR_ANY U64_MAX
0796
0797 #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
0798
0799 #define VMBUS_RQST_INIT (U64_MAX - 2)
0800 #define VMBUS_RQST_RESET (U64_MAX - 3)
0801
0802 struct vmbus_device {
0803 u16 dev_type;
0804 guid_t guid;
0805 bool perf_device;
0806 bool allowed_in_isolated;
0807 };
0808
0809 #define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
0810
0811 struct vmbus_gpadl {
0812 u32 gpadl_handle;
0813 u32 size;
0814 void *buffer;
0815 };
0816
0817 struct vmbus_channel {
0818 struct list_head listentry;
0819
0820 struct hv_device *device_obj;
0821
0822 enum vmbus_channel_state state;
0823
0824 struct vmbus_channel_offer_channel offermsg;
0825
0826
0827
0828
0829 u8 monitor_grp;
0830 u8 monitor_bit;
0831
0832 bool rescind;
0833 bool rescind_ref;
0834 struct completion rescind_event;
0835
0836 struct vmbus_gpadl ringbuffer_gpadlhandle;
0837
0838
0839 struct page *ringbuffer_page;
0840 u32 ringbuffer_pagecount;
0841 u32 ringbuffer_send_offset;
0842 struct hv_ring_buffer_info outbound;
0843 struct hv_ring_buffer_info inbound;
0844
0845 struct vmbus_close_msg close_msg;
0846
0847
0848 u64 interrupts;
0849 u64 sig_events;
0850
0851
0852
0853
0854
0855 u64 intr_out_empty;
0856
0857
0858
0859
0860
0861
0862 bool out_full_flag;
0863
0864
0865 struct tasklet_struct callback_event;
0866 void (*onchannel_callback)(void *context);
0867 void *channel_callback_context;
0868
0869 void (*change_target_cpu_callback)(struct vmbus_channel *channel,
0870 u32 old, u32 new);
0871
0872
0873
0874
0875
0876 spinlock_t sched_lock;
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889 enum hv_callback_mode {
0890 HV_CALL_BATCHED,
0891 HV_CALL_DIRECT,
0892 HV_CALL_ISR
0893 } callback_mode;
0894
0895 bool is_dedicated_interrupt;
0896 u64 sig_event;
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907 u32 target_cpu;
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
0929
0930
0931
0932
0933
0934 void (*chn_rescind_callback)(struct vmbus_channel *channel);
0935
0936
0937
0938
0939 struct list_head sc_list;
0940
0941
0942
0943
0944 struct vmbus_channel *primary_channel;
0945
0946
0947
0948 void *per_channel_state;
0949
0950
0951
0952
0953
0954 struct rcu_head rcu;
0955
0956
0957
0958
0959 struct kobject kobj;
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985 bool low_latency;
0986
0987 bool probe_done;
0988
0989
0990
0991
0992
0993
0994 u16 device_id;
0995
0996
0997
0998
0999
1000
1001
1002 struct work_struct add_channel_work;
1003
1004
1005
1006
1007
1008 u64 intr_in_full;
1009
1010
1011
1012
1013
1014 u64 out_full_total;
1015
1016
1017
1018
1019
1020 u64 out_full_first;
1021
1022
1023 bool fuzz_testing_state;
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 u32 fuzz_testing_interrupt_delay;
1034 u32 fuzz_testing_message_delay;
1035
1036
1037 u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
1038
1039 u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
1040
1041
1042 struct vmbus_requestor requestor;
1043 u32 rqstor_size;
1044
1045
1046 u32 max_pkt_size;
1047 };
1048
1049 #define lock_requestor(channel, flags) \
1050 do { \
1051 struct vmbus_requestor *rqstor = &(channel)->requestor; \
1052 \
1053 spin_lock_irqsave(&rqstor->req_lock, flags); \
1054 } while (0)
1055
1056 static __always_inline void unlock_requestor(struct vmbus_channel *channel,
1057 unsigned long flags)
1058 {
1059 struct vmbus_requestor *rqstor = &channel->requestor;
1060
1061 spin_unlock_irqrestore(&rqstor->req_lock, flags);
1062 }
1063
1064 u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
1065 u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1066 u64 rqst_addr);
1067 u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1068 u64 rqst_addr);
1069 u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
1070
1071 static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
1072 {
1073 return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1074 }
1075
1076 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1077 {
1078 return is_hvsock_offer(&c->offermsg);
1079 }
1080
1081 static inline bool is_sub_channel(const struct vmbus_channel *c)
1082 {
1083 return c->offermsg.offer.sub_channel_index != 0;
1084 }
1085
1086 static inline void set_channel_read_mode(struct vmbus_channel *c,
1087 enum hv_callback_mode mode)
1088 {
1089 c->callback_mode = mode;
1090 }
1091
1092 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1093 {
1094 c->per_channel_state = s;
1095 }
1096
1097 static inline void *get_per_channel_state(struct vmbus_channel *c)
1098 {
1099 return c->per_channel_state;
1100 }
1101
1102 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1103 u32 size)
1104 {
1105 unsigned long flags;
1106
1107 if (size) {
1108 spin_lock_irqsave(&c->outbound.ring_lock, flags);
1109 ++c->out_full_total;
1110
1111 if (!c->out_full_flag) {
1112 ++c->out_full_first;
1113 c->out_full_flag = true;
1114 }
1115 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1116 } else {
1117 c->out_full_flag = false;
1118 }
1119
1120 c->outbound.ring_buffer->pending_send_sz = size;
1121 }
1122
1123 void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1124
1125 int vmbus_request_offers(void);
1126
1127
1128
1129
1130
1131 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1132 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1133
1134 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1135 void (*chn_rescind_cb)(struct vmbus_channel *));
1136
1137
1138 struct vmbus_channel_packet_page_buffer {
1139 u16 type;
1140 u16 dataoffset8;
1141 u16 length8;
1142 u16 flags;
1143 u64 transactionid;
1144 u32 reserved;
1145 u32 rangecount;
1146 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1147 } __packed;
1148
1149
1150 struct vmbus_channel_packet_multipage_buffer {
1151 u16 type;
1152 u16 dataoffset8;
1153 u16 length8;
1154 u16 flags;
1155 u64 transactionid;
1156 u32 reserved;
1157 u32 rangecount;
1158 struct hv_multipage_buffer range;
1159 } __packed;
1160
1161
1162 struct vmbus_packet_mpb_array {
1163 u16 type;
1164 u16 dataoffset8;
1165 u16 length8;
1166 u16 flags;
1167 u64 transactionid;
1168 u32 reserved;
1169 u32 rangecount;
1170 struct hv_mpb_array range;
1171 } __packed;
1172
1173 int vmbus_alloc_ring(struct vmbus_channel *channel,
1174 u32 send_size, u32 recv_size);
1175 void vmbus_free_ring(struct vmbus_channel *channel);
1176
1177 int vmbus_connect_ring(struct vmbus_channel *channel,
1178 void (*onchannel_callback)(void *context),
1179 void *context);
1180 int vmbus_disconnect_ring(struct vmbus_channel *channel);
1181
1182 extern int vmbus_open(struct vmbus_channel *channel,
1183 u32 send_ringbuffersize,
1184 u32 recv_ringbuffersize,
1185 void *userdata,
1186 u32 userdatalen,
1187 void (*onchannel_callback)(void *context),
1188 void *context);
1189
1190 extern void vmbus_close(struct vmbus_channel *channel);
1191
1192 extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
1193 void *buffer,
1194 u32 bufferLen,
1195 u64 requestid,
1196 u64 *trans_id,
1197 enum vmbus_packet_type type,
1198 u32 flags);
1199 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1200 void *buffer,
1201 u32 bufferLen,
1202 u64 requestid,
1203 enum vmbus_packet_type type,
1204 u32 flags);
1205
1206 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1207 struct hv_page_buffer pagebuffers[],
1208 u32 pagecount,
1209 void *buffer,
1210 u32 bufferlen,
1211 u64 requestid);
1212
1213 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1214 struct vmbus_packet_mpb_array *mpb,
1215 u32 desc_size,
1216 void *buffer,
1217 u32 bufferlen,
1218 u64 requestid);
1219
1220 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1221 void *kbuffer,
1222 u32 size,
1223 struct vmbus_gpadl *gpadl);
1224
1225 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1226 struct vmbus_gpadl *gpadl);
1227
1228 void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1229
1230 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1231 void *buffer,
1232 u32 bufferlen,
1233 u32 *buffer_actual_len,
1234 u64 *requestid);
1235
1236 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1237 void *buffer,
1238 u32 bufferlen,
1239 u32 *buffer_actual_len,
1240 u64 *requestid);
1241
1242
1243 extern void vmbus_ontimer(unsigned long data);
1244
1245
1246 struct hv_driver {
1247 const char *name;
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261 bool hvsock;
1262
1263
1264 guid_t dev_type;
1265 const struct hv_vmbus_device_id *id_table;
1266
1267 struct device_driver driver;
1268
1269
1270 struct {
1271 spinlock_t lock;
1272 struct list_head list;
1273 } dynids;
1274
1275 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1276 int (*remove)(struct hv_device *);
1277 void (*shutdown)(struct hv_device *);
1278
1279 int (*suspend)(struct hv_device *);
1280 int (*resume)(struct hv_device *);
1281
1282 };
1283
1284
1285 struct hv_device {
1286
1287 guid_t dev_type;
1288
1289
1290 guid_t dev_instance;
1291 u16 vendor_id;
1292 u16 device_id;
1293
1294 struct device device;
1295
1296
1297
1298
1299 const char *driver_override;
1300
1301 struct vmbus_channel *channel;
1302 struct kset *channels_kset;
1303 struct device_dma_parameters dma_parms;
1304 u64 dma_mask;
1305
1306
1307 struct dentry *debug_dir;
1308
1309 };
1310
1311
1312 static inline struct hv_device *device_to_hv_device(struct device *d)
1313 {
1314 return container_of(d, struct hv_device, device);
1315 }
1316
1317 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1318 {
1319 return container_of(d, struct hv_driver, driver);
1320 }
1321
1322 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1323 {
1324 dev_set_drvdata(&dev->device, data);
1325 }
1326
1327 static inline void *hv_get_drvdata(struct hv_device *dev)
1328 {
1329 return dev_get_drvdata(&dev->device);
1330 }
1331
1332 struct hv_ring_buffer_debug_info {
1333 u32 current_interrupt_mask;
1334 u32 current_read_index;
1335 u32 current_write_index;
1336 u32 bytes_avail_toread;
1337 u32 bytes_avail_towrite;
1338 };
1339
1340
1341 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1342 struct hv_ring_buffer_debug_info *debug_info);
1343
1344
1345 #define vmbus_driver_register(driver) \
1346 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1347 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1348 struct module *owner,
1349 const char *mod_name);
1350 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1351
1352 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1353
1354 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1355 resource_size_t min, resource_size_t max,
1356 resource_size_t size, resource_size_t align,
1357 bool fb_overlap_ok);
1358 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368 #define HV_NIC_GUID \
1369 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1370 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1371
1372
1373
1374
1375
1376 #define HV_IDE_GUID \
1377 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1378 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1379
1380
1381
1382
1383
1384 #define HV_SCSI_GUID \
1385 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1386 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1387
1388
1389
1390
1391
1392 #define HV_SHUTDOWN_GUID \
1393 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1394 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1395
1396
1397
1398
1399
1400 #define HV_TS_GUID \
1401 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1402 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1403
1404
1405
1406
1407
1408 #define HV_HEART_BEAT_GUID \
1409 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1410 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1411
1412
1413
1414
1415
1416 #define HV_KVP_GUID \
1417 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1418 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1419
1420
1421
1422
1423
1424 #define HV_DM_GUID \
1425 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1426 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1427
1428
1429
1430
1431
1432 #define HV_MOUSE_GUID \
1433 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1434 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1435
1436
1437
1438
1439
1440 #define HV_KBD_GUID \
1441 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1442 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1443
1444
1445
1446
1447 #define HV_VSS_GUID \
1448 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1449 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1450
1451
1452
1453
1454 #define HV_SYNTHVID_GUID \
1455 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1456 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1457
1458
1459
1460
1461
1462 #define HV_SYNTHFC_GUID \
1463 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1464 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1465
1466
1467
1468
1469
1470
1471 #define HV_FCOPY_GUID \
1472 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1473 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1474
1475
1476
1477
1478
1479 #define HV_ND_GUID \
1480 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1481 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1482
1483
1484
1485
1486
1487
1488 #define HV_PCIE_GUID \
1489 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1490 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503 #define HV_AVMA1_GUID \
1504 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1505 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1506
1507 #define HV_AVMA2_GUID \
1508 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1509 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1510
1511 #define HV_RDV_GUID \
1512 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1513 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1514
1515 #define HV_IMC_GUID \
1516 .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
1517 0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
1518
1519
1520
1521
1522
1523 #define ICMSGTYPE_NEGOTIATE 0
1524 #define ICMSGTYPE_HEARTBEAT 1
1525 #define ICMSGTYPE_KVPEXCHANGE 2
1526 #define ICMSGTYPE_SHUTDOWN 3
1527 #define ICMSGTYPE_TIMESYNC 4
1528 #define ICMSGTYPE_VSS 5
1529 #define ICMSGTYPE_FCOPY 7
1530
1531 #define ICMSGHDRFLAG_TRANSACTION 1
1532 #define ICMSGHDRFLAG_REQUEST 2
1533 #define ICMSGHDRFLAG_RESPONSE 4
1534
1535
1536
1537
1538
1539
1540
1541
1542 struct hv_util_service {
1543 u8 *recv_buffer;
1544 void *channel;
1545 void (*util_cb)(void *);
1546 int (*util_init)(struct hv_util_service *);
1547 void (*util_deinit)(void);
1548 int (*util_pre_suspend)(void);
1549 int (*util_pre_resume)(void);
1550 };
1551
1552 struct vmbuspipe_hdr {
1553 u32 flags;
1554 u32 msgsize;
1555 } __packed;
1556
1557 struct ic_version {
1558 u16 major;
1559 u16 minor;
1560 } __packed;
1561
1562 struct icmsg_hdr {
1563 struct ic_version icverframe;
1564 u16 icmsgtype;
1565 struct ic_version icvermsg;
1566 u16 icmsgsize;
1567 u32 status;
1568 u8 ictransaction_id;
1569 u8 icflags;
1570 u8 reserved[2];
1571 } __packed;
1572
1573 #define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
1574 #define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
1575 #define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
1576 (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
1577 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
1578
1579 struct icmsg_negotiate {
1580 u16 icframe_vercnt;
1581 u16 icmsg_vercnt;
1582 u32 reserved;
1583 struct ic_version icversion_data[];
1584 } __packed;
1585
1586 struct shutdown_msg_data {
1587 u32 reason_code;
1588 u32 timeout_seconds;
1589 u32 flags;
1590 u8 display_message[2048];
1591 } __packed;
1592
1593 struct heartbeat_msg_data {
1594 u64 seq_num;
1595 u32 reserved[8];
1596 } __packed;
1597
1598
1599 #define ICTIMESYNCFLAG_PROBE 0
1600 #define ICTIMESYNCFLAG_SYNC 1
1601 #define ICTIMESYNCFLAG_SAMPLE 2
1602
1603 #ifdef __x86_64__
1604 #define WLTIMEDELTA 116444736000000000L
1605 #else
1606 #define WLTIMEDELTA 116444736000000000LL
1607 #endif
1608
1609 struct ictimesync_data {
1610 u64 parenttime;
1611 u64 childtime;
1612 u64 roundtriptime;
1613 u8 flags;
1614 } __packed;
1615
1616 struct ictimesync_ref_data {
1617 u64 parenttime;
1618 u64 vmreferencetime;
1619 u8 flags;
1620 char leapflags;
1621 char stratum;
1622 u8 reserved[3];
1623 } __packed;
1624
1625 struct hyperv_service_callback {
1626 u8 msg_type;
1627 char *log_msg;
1628 guid_t data;
1629 struct vmbus_channel *channel;
1630 void (*callback)(void *context);
1631 };
1632
1633 struct hv_dma_range {
1634 dma_addr_t dma;
1635 u32 mapping_size;
1636 };
1637
1638 #define MAX_SRV_VER 0x7ffffff
1639 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
1640 const int *fw_version, int fw_vercnt,
1641 const int *srv_version, int srv_vercnt,
1642 int *nego_fw_version, int *nego_srv_version);
1643
1644 void hv_process_channel_removal(struct vmbus_channel *channel);
1645
1646 void vmbus_setevent(struct vmbus_channel *channel);
1647
1648
1649
1650
1651 extern __u32 vmbus_proto_version;
1652
1653 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1654 const guid_t *shv_host_servie_id);
1655 int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
1656 void vmbus_set_event(struct vmbus_channel *channel);
1657
1658
1659 static inline void *
1660 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1661 {
1662 return ring_info->ring_buffer->buffer;
1663 }
1664
1665
1666
1667
1668 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1669 {
1670 rbi->ring_buffer->interrupt_mask = 1;
1671
1672
1673 virt_mb();
1674 }
1675
1676
1677
1678
1679 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1680 {
1681
1682 rbi->ring_buffer->interrupt_mask = 0;
1683
1684
1685 virt_mb();
1686
1687
1688
1689
1690
1691
1692 return hv_get_bytes_to_read(rbi);
1693 }
1694
1695
1696
1697
1698
1699
1700 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1701 {
1702 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1703 }
1704
1705
1706 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1707 {
1708 return (desc->len8 << 3) - (desc->offset8 << 3);
1709 }
1710
1711
1712 static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
1713 {
1714 return desc->len8 << 3;
1715 }
1716
1717 struct vmpacket_descriptor *
1718 hv_pkt_iter_first(struct vmbus_channel *channel);
1719
1720 struct vmpacket_descriptor *
1721 __hv_pkt_iter_next(struct vmbus_channel *channel,
1722 const struct vmpacket_descriptor *pkt);
1723
1724 void hv_pkt_iter_close(struct vmbus_channel *channel);
1725
1726 static inline struct vmpacket_descriptor *
1727 hv_pkt_iter_next(struct vmbus_channel *channel,
1728 const struct vmpacket_descriptor *pkt)
1729 {
1730 struct vmpacket_descriptor *nxt;
1731
1732 nxt = __hv_pkt_iter_next(channel, pkt);
1733 if (!nxt)
1734 hv_pkt_iter_close(channel);
1735
1736 return nxt;
1737 }
1738
1739 #define foreach_vmbus_pkt(pkt, channel) \
1740 for (pkt = hv_pkt_iter_first(channel); pkt; \
1741 pkt = hv_pkt_iter_next(channel, pkt))
1742
1743
1744
1745
1746
1747
1748
1749
1750 #define HV_CONFIG_BLOCK_SIZE_MAX 128
1751
1752 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1753 unsigned int block_id, unsigned int *bytes_returned);
1754 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1755 unsigned int block_id);
1756 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1757 void (*block_invalidate)(void *context,
1758 u64 block_mask));
1759
1760 struct hyperv_pci_block_ops {
1761 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1762 unsigned int block_id, unsigned int *bytes_returned);
1763 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1764 unsigned int block_id);
1765 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1766 void (*block_invalidate)(void *context,
1767 u64 block_mask));
1768 };
1769
1770 extern struct hyperv_pci_block_ops hvpci_block_ops;
1771
1772 static inline unsigned long virt_to_hvpfn(void *addr)
1773 {
1774 phys_addr_t paddr;
1775
1776 if (is_vmalloc_addr(addr))
1777 paddr = page_to_phys(vmalloc_to_page(addr)) +
1778 offset_in_page(addr);
1779 else
1780 paddr = __pa(addr);
1781
1782 return paddr >> HV_HYP_PAGE_SHIFT;
1783 }
1784
1785 #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1786 #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1787 #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1788 #define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT)
1789 #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1790
1791 #endif