0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef _HYPERV_VMBUS_H
0013 #define _HYPERV_VMBUS_H
0014
0015 #include <linux/list.h>
0016 #include <linux/bitops.h>
0017 #include <asm/sync_bitops.h>
0018 #include <asm/hyperv-tlfs.h>
0019 #include <linux/atomic.h>
0020 #include <linux/hyperv.h>
0021 #include <linux/interrupt.h>
0022
0023 #include "hv_trace.h"
0024
0025
0026
0027
0028 #define HV_UTIL_TIMEOUT 30
0029
0030
0031
0032
0033 #define HV_UTIL_NEGO_TIMEOUT 55
0034
0035
0036
0037 union hv_monitor_trigger_group {
0038 u64 as_uint64;
0039 struct {
0040 u32 pending;
0041 u32 armed;
0042 };
0043 };
0044
0045 struct hv_monitor_parameter {
0046 union hv_connection_id connectionid;
0047 u16 flagnumber;
0048 u16 rsvdz;
0049 };
0050
0051 union hv_monitor_trigger_state {
0052 u32 asu32;
0053
0054 struct {
0055 u32 group_enable:4;
0056 u32 rsvdz:28;
0057 };
0058 };
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 struct hv_monitor_page {
0080 union hv_monitor_trigger_state trigger_state;
0081 u32 rsvdz1;
0082
0083 union hv_monitor_trigger_group trigger_group[4];
0084 u64 rsvdz2[3];
0085
0086 s32 next_checktime[4][32];
0087
0088 u16 latency[4][32];
0089 u64 rsvdz3[32];
0090
0091 struct hv_monitor_parameter parameter[4][32];
0092
0093 u8 rsvdz4[1984];
0094 };
0095
0096 #define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
0097
0098
0099 struct hv_input_post_message {
0100 union hv_connection_id connectionid;
0101 u32 reserved;
0102 u32 message_type;
0103 u32 payload_size;
0104 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
0105 };
0106
0107
0108 enum {
0109 VMBUS_MESSAGE_CONNECTION_ID = 1,
0110 VMBUS_MESSAGE_CONNECTION_ID_4 = 4,
0111 VMBUS_MESSAGE_PORT_ID = 1,
0112 VMBUS_EVENT_CONNECTION_ID = 2,
0113 VMBUS_EVENT_PORT_ID = 2,
0114 VMBUS_MONITOR_CONNECTION_ID = 3,
0115 VMBUS_MONITOR_PORT_ID = 3,
0116 VMBUS_MESSAGE_SINT = 2,
0117 };
0118
0119
0120
0121
0122 struct hv_per_cpu_context {
0123 void *synic_message_page;
0124 void *synic_event_page;
0125
0126
0127
0128 void *post_msg_page;
0129
0130
0131
0132
0133
0134
0135 struct tasklet_struct msg_dpc;
0136 };
0137
0138 struct hv_context {
0139
0140
0141
0142 u64 guestid;
0143
0144 struct hv_per_cpu_context __percpu *cpu_context;
0145
0146
0147
0148
0149
0150 struct cpumask *hv_numa_map;
0151 };
0152
0153 extern struct hv_context hv_context;
0154
0155
0156
0157 extern int hv_init(void);
0158
0159 extern int hv_post_message(union hv_connection_id connection_id,
0160 enum hv_message_type message_type,
0161 void *payload, size_t payload_size);
0162
0163 extern int hv_synic_alloc(void);
0164
0165 extern void hv_synic_free(void);
0166
0167 extern void hv_synic_enable_regs(unsigned int cpu);
0168 extern int hv_synic_init(unsigned int cpu);
0169
0170 extern void hv_synic_disable_regs(unsigned int cpu);
0171 extern int hv_synic_cleanup(unsigned int cpu);
0172
0173
0174
0175 void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
0176
0177 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
0178 struct page *pages, u32 pagecnt, u32 max_pkt_size);
0179
0180 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
0181
0182 int hv_ringbuffer_write(struct vmbus_channel *channel,
0183 const struct kvec *kv_list, u32 kv_count,
0184 u64 requestid, u64 *trans_id);
0185
0186 int hv_ringbuffer_read(struct vmbus_channel *channel,
0187 void *buffer, u32 buflen, u32 *buffer_actual_len,
0188 u64 *requestid, bool raw);
0189
0190
0191
0192
0193
0194
0195 #define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
0196
0197
0198 #define MAX_NUM_CHANNELS_SUPPORTED 256
0199
0200 #define MAX_CHANNEL_RELIDS \
0201 max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
0202
0203 enum vmbus_connect_state {
0204 DISCONNECTED,
0205 CONNECTING,
0206 CONNECTED,
0207 DISCONNECTING
0208 };
0209
0210 #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
0211
0212
0213
0214
0215
0216 #define VMBUS_CONNECT_CPU 0
0217
0218 struct vmbus_connection {
0219 u32 msg_conn_id;
0220
0221 atomic_t offer_in_progress;
0222
0223 enum vmbus_connect_state conn_state;
0224
0225 atomic_t next_gpadl_handle;
0226
0227 struct completion unload_event;
0228
0229
0230
0231
0232
0233
0234
0235 void *int_page;
0236 void *send_int_page;
0237 void *recv_int_page;
0238
0239
0240
0241
0242
0243 struct hv_monitor_page *monitor_pages[2];
0244 void *monitor_pages_original[2];
0245 phys_addr_t monitor_pages_pa[2];
0246 struct list_head chn_msg_list;
0247 spinlock_t channelmsg_lock;
0248
0249
0250 struct list_head chn_list;
0251 struct mutex channel_mutex;
0252
0253
0254 struct vmbus_channel **channels;
0255
0256
0257
0258
0259
0260
0261 struct workqueue_struct *work_queue;
0262 struct workqueue_struct *handle_primary_chan_wq;
0263 struct workqueue_struct *handle_sub_chan_wq;
0264 struct workqueue_struct *rescind_work_queue;
0265
0266
0267
0268
0269
0270 bool ignore_any_offer_msg;
0271
0272
0273
0274
0275
0276
0277 atomic_t nr_chan_close_on_suspend;
0278
0279
0280
0281
0282 struct completion ready_for_suspend_event;
0283
0284
0285
0286
0287
0288
0289
0290
0291 atomic_t nr_chan_fixup_on_resume;
0292
0293
0294
0295
0296 struct completion ready_for_resume_event;
0297 };
0298
0299
0300 struct vmbus_msginfo {
0301
0302 struct list_head msglist_entry;
0303
0304
0305 unsigned char msg[];
0306 };
0307
0308
0309 extern struct vmbus_connection vmbus_connection;
0310
0311 int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
0312
0313 static inline void vmbus_send_interrupt(u32 relid)
0314 {
0315 sync_set_bit(relid, vmbus_connection.send_int_page);
0316 }
0317
0318 enum vmbus_message_handler_type {
0319
0320 VMHT_BLOCKING = 0,
0321
0322
0323 VMHT_NON_BLOCKING = 1,
0324 };
0325
0326 struct vmbus_channel_message_table_entry {
0327 enum vmbus_channel_message_type message_type;
0328 enum vmbus_message_handler_type handler_type;
0329 void (*message_handler)(struct vmbus_channel_message_header *msg);
0330 u32 min_payload_len;
0331 };
0332
0333 extern const struct vmbus_channel_message_table_entry
0334 channel_message_table[CHANNELMSG_COUNT];
0335
0336
0337
0338
0339 struct hv_device *vmbus_device_create(const guid_t *type,
0340 const guid_t *instance,
0341 struct vmbus_channel *channel);
0342
0343 int vmbus_device_register(struct hv_device *child_device_obj);
0344 void vmbus_device_unregister(struct hv_device *device_obj);
0345 int vmbus_add_channel_kobj(struct hv_device *device_obj,
0346 struct vmbus_channel *channel);
0347
0348 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
0349
0350 void vmbus_channel_map_relid(struct vmbus_channel *channel);
0351 void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
0352
0353 struct vmbus_channel *relid2channel(u32 relid);
0354
0355 void vmbus_free_channels(void);
0356
0357
0358
0359 int vmbus_connect(void);
0360 void vmbus_disconnect(void);
0361
0362 int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
0363
0364 void vmbus_on_event(unsigned long data);
0365 void vmbus_on_msg_dpc(unsigned long data);
0366
0367 int hv_kvp_init(struct hv_util_service *srv);
0368 void hv_kvp_deinit(void);
0369 int hv_kvp_pre_suspend(void);
0370 int hv_kvp_pre_resume(void);
0371 void hv_kvp_onchannelcallback(void *context);
0372
0373 int hv_vss_init(struct hv_util_service *srv);
0374 void hv_vss_deinit(void);
0375 int hv_vss_pre_suspend(void);
0376 int hv_vss_pre_resume(void);
0377 void hv_vss_onchannelcallback(void *context);
0378
0379 int hv_fcopy_init(struct hv_util_service *srv);
0380 void hv_fcopy_deinit(void);
0381 int hv_fcopy_pre_suspend(void);
0382 int hv_fcopy_pre_resume(void);
0383 void hv_fcopy_onchannelcallback(void *context);
0384 void vmbus_initiate_unload(bool crash);
0385
0386 static inline void hv_poll_channel(struct vmbus_channel *channel,
0387 void (*cb)(void *))
0388 {
0389 if (!channel)
0390 return;
0391 cb(channel);
0392 }
0393
0394 enum hvutil_device_state {
0395 HVUTIL_DEVICE_INIT = 0,
0396 HVUTIL_READY,
0397 HVUTIL_HOSTMSG_RECEIVED,
0398 HVUTIL_USERSPACE_REQ,
0399 HVUTIL_USERSPACE_RECV,
0400 HVUTIL_DEVICE_DYING,
0401 };
0402
0403 enum delay {
0404 INTERRUPT_DELAY = 0,
0405 MESSAGE_DELAY = 1,
0406 };
0407
0408 extern const struct vmbus_device vmbus_devs[];
0409
0410 static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
0411 {
0412 return vmbus_devs[channel->device_id].perf_device;
0413 }
0414
0415 static inline bool hv_is_allocated_cpu(unsigned int cpu)
0416 {
0417 struct vmbus_channel *channel, *sc;
0418
0419 lockdep_assert_held(&vmbus_connection.channel_mutex);
0420
0421
0422
0423
0424 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
0425 if (!hv_is_perf_channel(channel))
0426 continue;
0427 if (channel->target_cpu == cpu)
0428 return true;
0429 list_for_each_entry(sc, &channel->sc_list, sc_list) {
0430 if (sc->target_cpu == cpu)
0431 return true;
0432 }
0433 }
0434 return false;
0435 }
0436
0437 static inline void hv_set_allocated_cpu(unsigned int cpu)
0438 {
0439 cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
0440 }
0441
0442 static inline void hv_clear_allocated_cpu(unsigned int cpu)
0443 {
0444 if (hv_is_allocated_cpu(cpu))
0445 return;
0446 cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
0447 }
0448
0449 static inline void hv_update_allocated_cpus(unsigned int old_cpu,
0450 unsigned int new_cpu)
0451 {
0452 hv_set_allocated_cpu(new_cpu);
0453 hv_clear_allocated_cpu(old_cpu);
0454 }
0455
0456 #ifdef CONFIG_HYPERV_TESTING
0457
0458 int hv_debug_add_dev_dir(struct hv_device *dev);
0459 void hv_debug_rm_dev_dir(struct hv_device *dev);
0460 void hv_debug_rm_all_dir(void);
0461 int hv_debug_init(void);
0462 void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
0463
0464 #else
0465
0466 static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
0467 static inline void hv_debug_rm_all_dir(void) {};
0468 static inline void hv_debug_delay_test(struct vmbus_channel *channel,
0469 enum delay delay_type) {};
0470 static inline int hv_debug_init(void)
0471 {
0472 return -1;
0473 }
0474
0475 static inline int hv_debug_add_dev_dir(struct hv_device *dev)
0476 {
0477 return -1;
0478 }
0479
0480 #endif
0481
0482 #endif