0001
0002
0003
0004
0005
0006
0007
0008
0009 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0010
0011 #include <linux/kernel.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/sched.h>
0014 #include <linux/wait.h>
0015 #include <linux/mm.h>
0016 #include <linux/slab.h>
0017 #include <linux/list.h>
0018 #include <linux/module.h>
0019 #include <linux/completion.h>
0020 #include <linux/delay.h>
0021 #include <linux/cpu.h>
0022 #include <linux/hyperv.h>
0023 #include <asm/mshyperv.h>
0024 #include <linux/sched/isolation.h>
0025
0026 #include "hyperv_vmbus.h"
0027
0028 static void init_vp_index(struct vmbus_channel *channel);
0029
0030 const struct vmbus_device vmbus_devs[] = {
0031
0032 { .dev_type = HV_IDE,
0033 HV_IDE_GUID,
0034 .perf_device = true,
0035 .allowed_in_isolated = false,
0036 },
0037
0038
0039 { .dev_type = HV_SCSI,
0040 HV_SCSI_GUID,
0041 .perf_device = true,
0042 .allowed_in_isolated = true,
0043 },
0044
0045
0046 { .dev_type = HV_FC,
0047 HV_SYNTHFC_GUID,
0048 .perf_device = true,
0049 .allowed_in_isolated = false,
0050 },
0051
0052
0053 { .dev_type = HV_NIC,
0054 HV_NIC_GUID,
0055 .perf_device = true,
0056 .allowed_in_isolated = true,
0057 },
0058
0059
0060 { .dev_type = HV_ND,
0061 HV_ND_GUID,
0062 .perf_device = true,
0063 .allowed_in_isolated = false,
0064 },
0065
0066
0067 { .dev_type = HV_PCIE,
0068 HV_PCIE_GUID,
0069 .perf_device = false,
0070 .allowed_in_isolated = false,
0071 },
0072
0073
0074 { .dev_type = HV_FB,
0075 HV_SYNTHVID_GUID,
0076 .perf_device = false,
0077 .allowed_in_isolated = false,
0078 },
0079
0080
0081 { .dev_type = HV_KBD,
0082 HV_KBD_GUID,
0083 .perf_device = false,
0084 .allowed_in_isolated = false,
0085 },
0086
0087
0088 { .dev_type = HV_MOUSE,
0089 HV_MOUSE_GUID,
0090 .perf_device = false,
0091 .allowed_in_isolated = false,
0092 },
0093
0094
0095 { .dev_type = HV_KVP,
0096 HV_KVP_GUID,
0097 .perf_device = false,
0098 .allowed_in_isolated = false,
0099 },
0100
0101
0102 { .dev_type = HV_TS,
0103 HV_TS_GUID,
0104 .perf_device = false,
0105 .allowed_in_isolated = true,
0106 },
0107
0108
0109 { .dev_type = HV_HB,
0110 HV_HEART_BEAT_GUID,
0111 .perf_device = false,
0112 .allowed_in_isolated = true,
0113 },
0114
0115
0116 { .dev_type = HV_SHUTDOWN,
0117 HV_SHUTDOWN_GUID,
0118 .perf_device = false,
0119 .allowed_in_isolated = true,
0120 },
0121
0122
0123 { .dev_type = HV_FCOPY,
0124 HV_FCOPY_GUID,
0125 .perf_device = false,
0126 .allowed_in_isolated = false,
0127 },
0128
0129
0130 { .dev_type = HV_BACKUP,
0131 HV_VSS_GUID,
0132 .perf_device = false,
0133 .allowed_in_isolated = false,
0134 },
0135
0136
0137 { .dev_type = HV_DM,
0138 HV_DM_GUID,
0139 .perf_device = false,
0140 .allowed_in_isolated = false,
0141 },
0142
0143
0144 { .dev_type = HV_UNKNOWN,
0145 .perf_device = false,
0146 .allowed_in_isolated = false,
0147 },
0148 };
0149
0150 static const struct {
0151 guid_t guid;
0152 } vmbus_unsupported_devs[] = {
0153 { HV_AVMA1_GUID },
0154 { HV_AVMA2_GUID },
0155 { HV_RDV_GUID },
0156 { HV_IMC_GUID },
0157 };
0158
0159
0160
0161
0162
0163 static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
0164 {
0165 struct vmbus_channel_msginfo *msginfo;
0166 unsigned long flags;
0167
0168
0169 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
0170 channel->rescind = true;
0171 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
0172 msglistentry) {
0173
0174 if (msginfo->waiting_channel == channel) {
0175 complete(&msginfo->waitevent);
0176 break;
0177 }
0178 }
0179 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
0180 }
0181
0182 static bool is_unsupported_vmbus_devs(const guid_t *guid)
0183 {
0184 int i;
0185
0186 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
0187 if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
0188 return true;
0189 return false;
0190 }
0191
0192 static u16 hv_get_dev_type(const struct vmbus_channel *channel)
0193 {
0194 const guid_t *guid = &channel->offermsg.offer.if_type;
0195 u16 i;
0196
0197 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
0198 return HV_UNKNOWN;
0199
0200 for (i = HV_IDE; i < HV_UNKNOWN; i++) {
0201 if (guid_equal(guid, &vmbus_devs[i].guid))
0202 return i;
0203 }
0204 pr_info("Unknown GUID: %pUl\n", guid);
0205 return i;
0206 }
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
0226 u32 buflen, const int *fw_version, int fw_vercnt,
0227 const int *srv_version, int srv_vercnt,
0228 int *nego_fw_version, int *nego_srv_version)
0229 {
0230 int icframe_major, icframe_minor;
0231 int icmsg_major, icmsg_minor;
0232 int fw_major, fw_minor;
0233 int srv_major, srv_minor;
0234 int i, j;
0235 bool found_match = false;
0236 struct icmsg_negotiate *negop;
0237
0238
0239 if (buflen < ICMSG_HDR + offsetof(struct icmsg_negotiate, reserved)) {
0240 pr_err_ratelimited("Invalid icmsg negotiate\n");
0241 return false;
0242 }
0243
0244 icmsghdrp->icmsgsize = 0x10;
0245 negop = (struct icmsg_negotiate *)&buf[ICMSG_HDR];
0246
0247 icframe_major = negop->icframe_vercnt;
0248 icframe_minor = 0;
0249
0250 icmsg_major = negop->icmsg_vercnt;
0251 icmsg_minor = 0;
0252
0253
0254 if (icframe_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
0255 icmsg_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
0256 ICMSG_NEGOTIATE_PKT_SIZE(icframe_major, icmsg_major) > buflen) {
0257 pr_err_ratelimited("Invalid icmsg negotiate - icframe_major: %u, icmsg_major: %u\n",
0258 icframe_major, icmsg_major);
0259 goto fw_error;
0260 }
0261
0262
0263
0264
0265
0266
0267 for (i = 0; i < fw_vercnt; i++) {
0268 fw_major = (fw_version[i] >> 16);
0269 fw_minor = (fw_version[i] & 0xFFFF);
0270
0271 for (j = 0; j < negop->icframe_vercnt; j++) {
0272 if ((negop->icversion_data[j].major == fw_major) &&
0273 (negop->icversion_data[j].minor == fw_minor)) {
0274 icframe_major = negop->icversion_data[j].major;
0275 icframe_minor = negop->icversion_data[j].minor;
0276 found_match = true;
0277 break;
0278 }
0279 }
0280
0281 if (found_match)
0282 break;
0283 }
0284
0285 if (!found_match)
0286 goto fw_error;
0287
0288 found_match = false;
0289
0290 for (i = 0; i < srv_vercnt; i++) {
0291 srv_major = (srv_version[i] >> 16);
0292 srv_minor = (srv_version[i] & 0xFFFF);
0293
0294 for (j = negop->icframe_vercnt;
0295 (j < negop->icframe_vercnt + negop->icmsg_vercnt);
0296 j++) {
0297
0298 if ((negop->icversion_data[j].major == srv_major) &&
0299 (negop->icversion_data[j].minor == srv_minor)) {
0300
0301 icmsg_major = negop->icversion_data[j].major;
0302 icmsg_minor = negop->icversion_data[j].minor;
0303 found_match = true;
0304 break;
0305 }
0306 }
0307
0308 if (found_match)
0309 break;
0310 }
0311
0312
0313
0314
0315
0316
0317 fw_error:
0318 if (!found_match) {
0319 negop->icframe_vercnt = 0;
0320 negop->icmsg_vercnt = 0;
0321 } else {
0322 negop->icframe_vercnt = 1;
0323 negop->icmsg_vercnt = 1;
0324 }
0325
0326 if (nego_fw_version)
0327 *nego_fw_version = (icframe_major << 16) | icframe_minor;
0328
0329 if (nego_srv_version)
0330 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
0331
0332 negop->icversion_data[0].major = icframe_major;
0333 negop->icversion_data[0].minor = icframe_minor;
0334 negop->icversion_data[1].major = icmsg_major;
0335 negop->icversion_data[1].minor = icmsg_minor;
0336 return found_match;
0337 }
0338 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
0339
0340
0341
0342
0343 static struct vmbus_channel *alloc_channel(void)
0344 {
0345 struct vmbus_channel *channel;
0346
0347 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
0348 if (!channel)
0349 return NULL;
0350
0351 spin_lock_init(&channel->sched_lock);
0352 init_completion(&channel->rescind_event);
0353
0354 INIT_LIST_HEAD(&channel->sc_list);
0355
0356 tasklet_init(&channel->callback_event,
0357 vmbus_on_event, (unsigned long)channel);
0358
0359 hv_ringbuffer_pre_init(channel);
0360
0361 return channel;
0362 }
0363
0364
0365
0366
0367 static void free_channel(struct vmbus_channel *channel)
0368 {
0369 tasklet_kill(&channel->callback_event);
0370 vmbus_remove_channel_attr_group(channel);
0371
0372 kobject_put(&channel->kobj);
0373 }
0374
0375 void vmbus_channel_map_relid(struct vmbus_channel *channel)
0376 {
0377 if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
0378 return;
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404 virt_store_mb(
0405 vmbus_connection.channels[channel->offermsg.child_relid],
0406 channel);
0407 }
0408
0409 void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
0410 {
0411 if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
0412 return;
0413 WRITE_ONCE(
0414 vmbus_connection.channels[channel->offermsg.child_relid],
0415 NULL);
0416 }
0417
0418 static void vmbus_release_relid(u32 relid)
0419 {
0420 struct vmbus_channel_relid_released msg;
0421 int ret;
0422
0423 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
0424 msg.child_relid = relid;
0425 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
0426 ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
0427 true);
0428
0429 trace_vmbus_release_relid(&msg, ret);
0430 }
0431
0432 void hv_process_channel_removal(struct vmbus_channel *channel)
0433 {
0434 lockdep_assert_held(&vmbus_connection.channel_mutex);
0435 BUG_ON(!channel->rescind);
0436
0437
0438
0439
0440
0441 WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
0442 !is_hvsock_channel(channel));
0443
0444
0445
0446
0447
0448
0449
0450
0451 if (channel->offermsg.child_relid != INVALID_RELID)
0452 vmbus_channel_unmap_relid(channel);
0453
0454 if (channel->primary_channel == NULL)
0455 list_del(&channel->listentry);
0456 else
0457 list_del(&channel->sc_list);
0458
0459
0460
0461
0462
0463 if (hv_is_perf_channel(channel))
0464 hv_clear_allocated_cpu(channel->target_cpu);
0465
0466
0467
0468
0469
0470
0471
0472
0473 if (channel->offermsg.child_relid != INVALID_RELID)
0474 vmbus_release_relid(channel->offermsg.child_relid);
0475
0476 free_channel(channel);
0477 }
0478
0479 void vmbus_free_channels(void)
0480 {
0481 struct vmbus_channel *channel, *tmp;
0482
0483 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
0484 listentry) {
0485
0486 channel->rescind = true;
0487
0488 vmbus_device_unregister(channel->device_obj);
0489 }
0490 }
0491
0492
0493 static void vmbus_add_channel_work(struct work_struct *work)
0494 {
0495 struct vmbus_channel *newchannel =
0496 container_of(work, struct vmbus_channel, add_channel_work);
0497 struct vmbus_channel *primary_channel = newchannel->primary_channel;
0498 int ret;
0499
0500
0501
0502
0503
0504
0505 newchannel->state = CHANNEL_OPEN_STATE;
0506
0507 if (primary_channel != NULL) {
0508
0509 struct hv_device *dev = primary_channel->device_obj;
0510
0511 if (vmbus_add_channel_kobj(dev, newchannel))
0512 goto err_deq_chan;
0513
0514 if (primary_channel->sc_creation_callback != NULL)
0515 primary_channel->sc_creation_callback(newchannel);
0516
0517 newchannel->probe_done = true;
0518 return;
0519 }
0520
0521
0522
0523
0524 newchannel->device_obj = vmbus_device_create(
0525 &newchannel->offermsg.offer.if_type,
0526 &newchannel->offermsg.offer.if_instance,
0527 newchannel);
0528 if (!newchannel->device_obj)
0529 goto err_deq_chan;
0530
0531 newchannel->device_obj->device_id = newchannel->device_id;
0532
0533
0534
0535
0536
0537 ret = vmbus_device_register(newchannel->device_obj);
0538
0539 if (ret != 0) {
0540 pr_err("unable to add child device object (relid %d)\n",
0541 newchannel->offermsg.child_relid);
0542 kfree(newchannel->device_obj);
0543 goto err_deq_chan;
0544 }
0545
0546 newchannel->probe_done = true;
0547 return;
0548
0549 err_deq_chan:
0550 mutex_lock(&vmbus_connection.channel_mutex);
0551
0552
0553
0554
0555
0556 newchannel->probe_done = true;
0557
0558 if (primary_channel == NULL)
0559 list_del(&newchannel->listentry);
0560 else
0561 list_del(&newchannel->sc_list);
0562
0563
0564 vmbus_channel_unmap_relid(newchannel);
0565
0566 mutex_unlock(&vmbus_connection.channel_mutex);
0567
0568 vmbus_release_relid(newchannel->offermsg.child_relid);
0569
0570 free_channel(newchannel);
0571 }
0572
0573
0574
0575
0576
0577 static void vmbus_process_offer(struct vmbus_channel *newchannel)
0578 {
0579 struct vmbus_channel *channel;
0580 struct workqueue_struct *wq;
0581 bool fnew = true;
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602 cpus_read_lock();
0603
0604
0605
0606
0607
0608 mutex_lock(&vmbus_connection.channel_mutex);
0609
0610 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
0611 if (guid_equal(&channel->offermsg.offer.if_type,
0612 &newchannel->offermsg.offer.if_type) &&
0613 guid_equal(&channel->offermsg.offer.if_instance,
0614 &newchannel->offermsg.offer.if_instance)) {
0615 fnew = false;
0616 newchannel->primary_channel = channel;
0617 break;
0618 }
0619 }
0620
0621 init_vp_index(newchannel);
0622
0623
0624 if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
0625 atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
0626
0627
0628
0629
0630
0631 atomic_dec(&vmbus_connection.offer_in_progress);
0632
0633 if (fnew) {
0634 list_add_tail(&newchannel->listentry,
0635 &vmbus_connection.chn_list);
0636 } else {
0637
0638
0639
0640 if (newchannel->offermsg.offer.sub_channel_index == 0) {
0641 mutex_unlock(&vmbus_connection.channel_mutex);
0642 cpus_read_unlock();
0643
0644
0645
0646
0647 kfree(newchannel);
0648 WARN_ON_ONCE(1);
0649 return;
0650 }
0651
0652
0653
0654 list_add_tail(&newchannel->sc_list, &channel->sc_list);
0655 }
0656
0657 vmbus_channel_map_relid(newchannel);
0658
0659 mutex_unlock(&vmbus_connection.channel_mutex);
0660 cpus_read_unlock();
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683 INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
0684 wq = fnew ? vmbus_connection.handle_primary_chan_wq :
0685 vmbus_connection.handle_sub_chan_wq;
0686 queue_work(wq, &newchannel->add_channel_work);
0687 }
0688
0689
0690
0691
0692
0693 static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
0694 {
0695 struct vmbus_channel *primary = chn->primary_channel;
0696 struct vmbus_channel *sc;
0697
0698 lockdep_assert_held(&vmbus_connection.channel_mutex);
0699
0700 if (!primary)
0701 return false;
0702
0703 if (primary->target_cpu == cpu)
0704 return true;
0705
0706 list_for_each_entry(sc, &primary->sc_list, sc_list)
0707 if (sc != chn && sc->target_cpu == cpu)
0708 return true;
0709
0710 return false;
0711 }
0712
0713
0714
0715
0716 static int next_numa_node_id;
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727 static void init_vp_index(struct vmbus_channel *channel)
0728 {
0729 bool perf_chn = hv_is_perf_channel(channel);
0730 u32 i, ncpu = num_online_cpus();
0731 cpumask_var_t available_mask;
0732 struct cpumask *allocated_mask;
0733 const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
0734 u32 target_cpu;
0735 int numa_node;
0736
0737 if (!perf_chn ||
0738 !alloc_cpumask_var(&available_mask, GFP_KERNEL) ||
0739 cpumask_empty(hk_mask)) {
0740
0741
0742
0743
0744
0745
0746
0747
0748 channel->target_cpu = VMBUS_CONNECT_CPU;
0749 if (perf_chn)
0750 hv_set_allocated_cpu(VMBUS_CONNECT_CPU);
0751 return;
0752 }
0753
0754 for (i = 1; i <= ncpu + 1; i++) {
0755 while (true) {
0756 numa_node = next_numa_node_id++;
0757 if (numa_node == nr_node_ids) {
0758 next_numa_node_id = 0;
0759 continue;
0760 }
0761 if (cpumask_empty(cpumask_of_node(numa_node)))
0762 continue;
0763 break;
0764 }
0765 allocated_mask = &hv_context.hv_numa_map[numa_node];
0766
0767 retry:
0768 cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node));
0769 cpumask_and(available_mask, available_mask, hk_mask);
0770
0771 if (cpumask_empty(available_mask)) {
0772
0773
0774
0775
0776 cpumask_clear(allocated_mask);
0777 goto retry;
0778 }
0779
0780 target_cpu = cpumask_first(available_mask);
0781 cpumask_set_cpu(target_cpu, allocated_mask);
0782
0783 if (channel->offermsg.offer.sub_channel_index >= ncpu ||
0784 i > ncpu || !hv_cpuself_used(target_cpu, channel))
0785 break;
0786 }
0787
0788 channel->target_cpu = target_cpu;
0789
0790 free_cpumask_var(available_mask);
0791 }
0792
0793 #define UNLOAD_DELAY_UNIT_MS 10
0794 #define UNLOAD_WAIT_MS (100*1000)
0795 #define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
0796 #define UNLOAD_MSG_MS (5*1000)
0797 #define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
0798
0799 static void vmbus_wait_for_unload(void)
0800 {
0801 int cpu;
0802 void *page_addr;
0803 struct hv_message *msg;
0804 struct vmbus_channel_message_header *hdr;
0805 u32 message_type, i;
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824 for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
0825 if (completion_done(&vmbus_connection.unload_event))
0826 goto completed;
0827
0828 for_each_online_cpu(cpu) {
0829 struct hv_per_cpu_context *hv_cpu
0830 = per_cpu_ptr(hv_context.cpu_context, cpu);
0831
0832 page_addr = hv_cpu->synic_message_page;
0833 msg = (struct hv_message *)page_addr
0834 + VMBUS_MESSAGE_SINT;
0835
0836 message_type = READ_ONCE(msg->header.message_type);
0837 if (message_type == HVMSG_NONE)
0838 continue;
0839
0840 hdr = (struct vmbus_channel_message_header *)
0841 msg->u.payload;
0842
0843 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
0844 complete(&vmbus_connection.unload_event);
0845
0846 vmbus_signal_eom(msg, message_type);
0847 }
0848
0849
0850
0851
0852
0853 if (!(i % UNLOAD_MSG_LOOPS))
0854 pr_notice("Waiting for VMBus UNLOAD to complete\n");
0855
0856 mdelay(UNLOAD_DELAY_UNIT_MS);
0857 }
0858 pr_err("Continuing even though VMBus UNLOAD did not complete\n");
0859
0860 completed:
0861
0862
0863
0864
0865
0866 for_each_online_cpu(cpu) {
0867 struct hv_per_cpu_context *hv_cpu
0868 = per_cpu_ptr(hv_context.cpu_context, cpu);
0869
0870 page_addr = hv_cpu->synic_message_page;
0871 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
0872 msg->header.message_type = HVMSG_NONE;
0873 }
0874 }
0875
0876
0877
0878
0879 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
0880 {
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890 complete(&vmbus_connection.unload_event);
0891 }
0892
0893 void vmbus_initiate_unload(bool crash)
0894 {
0895 struct vmbus_channel_message_header hdr;
0896
0897 if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
0898 return;
0899
0900
0901 if (vmbus_proto_version < VERSION_WIN8_1)
0902 return;
0903
0904 reinit_completion(&vmbus_connection.unload_event);
0905 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
0906 hdr.msgtype = CHANNELMSG_UNLOAD;
0907 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
0908 !crash);
0909
0910
0911
0912
0913
0914 if (!crash)
0915 wait_for_completion(&vmbus_connection.unload_event);
0916 else
0917 vmbus_wait_for_unload();
0918 }
0919
0920 static void check_ready_for_resume_event(void)
0921 {
0922
0923
0924
0925
0926 if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
0927 complete(&vmbus_connection.ready_for_resume_event);
0928 }
0929
0930 static void vmbus_setup_channel_state(struct vmbus_channel *channel,
0931 struct vmbus_channel_offer_channel *offer)
0932 {
0933
0934
0935
0936 channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
0937
0938 channel->is_dedicated_interrupt =
0939 (offer->is_dedicated_interrupt != 0);
0940 channel->sig_event = offer->connection_id;
0941
0942 memcpy(&channel->offermsg, offer,
0943 sizeof(struct vmbus_channel_offer_channel));
0944 channel->monitor_grp = (u8)offer->monitorid / 32;
0945 channel->monitor_bit = (u8)offer->monitorid % 32;
0946 channel->device_id = hv_get_dev_type(channel);
0947 }
0948
0949
0950
0951
0952
0953 static struct vmbus_channel *
0954 find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
0955 {
0956 struct vmbus_channel *channel = NULL, *iter;
0957 const guid_t *inst1, *inst2;
0958
0959
0960 if (offer->offer.sub_channel_index != 0)
0961 return NULL;
0962
0963 mutex_lock(&vmbus_connection.channel_mutex);
0964
0965 list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
0966 inst1 = &iter->offermsg.offer.if_instance;
0967 inst2 = &offer->offer.if_instance;
0968
0969 if (guid_equal(inst1, inst2)) {
0970 channel = iter;
0971 break;
0972 }
0973 }
0974
0975 mutex_unlock(&vmbus_connection.channel_mutex);
0976
0977 return channel;
0978 }
0979
0980 static bool vmbus_is_valid_offer(const struct vmbus_channel_offer_channel *offer)
0981 {
0982 const guid_t *guid = &offer->offer.if_type;
0983 u16 i;
0984
0985 if (!hv_is_isolation_supported())
0986 return true;
0987
0988 if (is_hvsock_offer(offer))
0989 return true;
0990
0991 for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) {
0992 if (guid_equal(guid, &vmbus_devs[i].guid))
0993 return vmbus_devs[i].allowed_in_isolated;
0994 }
0995 return false;
0996 }
0997
0998
0999
1000
1001
1002 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
1003 {
1004 struct vmbus_channel_offer_channel *offer;
1005 struct vmbus_channel *oldchannel, *newchannel;
1006 size_t offer_sz;
1007
1008 offer = (struct vmbus_channel_offer_channel *)hdr;
1009
1010 trace_vmbus_onoffer(offer);
1011
1012 if (!vmbus_is_valid_offer(offer)) {
1013 pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n",
1014 offer->child_relid);
1015 atomic_dec(&vmbus_connection.offer_in_progress);
1016 return;
1017 }
1018
1019 oldchannel = find_primary_channel_by_offer(offer);
1020
1021 if (oldchannel != NULL) {
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 mutex_lock(&vmbus_connection.channel_mutex);
1052
1053 atomic_dec(&vmbus_connection.offer_in_progress);
1054
1055 WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
1056
1057 oldchannel->offermsg.child_relid = offer->child_relid;
1058
1059 offer_sz = sizeof(*offer);
1060 if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
1061
1062
1063
1064
1065
1066
1067
1068 pr_debug("vmbus offer changed: relid=%d\n",
1069 offer->child_relid);
1070
1071 print_hex_dump_debug("Old vmbus offer: ",
1072 DUMP_PREFIX_OFFSET, 16, 4,
1073 &oldchannel->offermsg, offer_sz,
1074 false);
1075 print_hex_dump_debug("New vmbus offer: ",
1076 DUMP_PREFIX_OFFSET, 16, 4,
1077 offer, offer_sz, false);
1078
1079
1080 vmbus_setup_channel_state(oldchannel, offer);
1081 }
1082
1083
1084 vmbus_channel_map_relid(oldchannel);
1085 check_ready_for_resume_event();
1086
1087 mutex_unlock(&vmbus_connection.channel_mutex);
1088 return;
1089 }
1090
1091
1092 newchannel = alloc_channel();
1093 if (!newchannel) {
1094 vmbus_release_relid(offer->child_relid);
1095 atomic_dec(&vmbus_connection.offer_in_progress);
1096 pr_err("Unable to allocate channel object\n");
1097 return;
1098 }
1099
1100 vmbus_setup_channel_state(newchannel, offer);
1101
1102 vmbus_process_offer(newchannel);
1103 }
1104
1105 static void check_ready_for_suspend_event(void)
1106 {
1107
1108
1109
1110
1111 if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
1112 complete(&vmbus_connection.ready_for_suspend_event);
1113 }
1114
1115
1116
1117
1118
1119
1120 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1121 {
1122 struct vmbus_channel_rescind_offer *rescind;
1123 struct vmbus_channel *channel;
1124 struct device *dev;
1125 bool clean_up_chan_for_suspend;
1126
1127 rescind = (struct vmbus_channel_rescind_offer *)hdr;
1128
1129 trace_vmbus_onoffer_rescind(rescind);
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
1156
1157
1158
1159
1160 msleep(1);
1161 }
1162
1163 mutex_lock(&vmbus_connection.channel_mutex);
1164 channel = relid2channel(rescind->child_relid);
1165 if (channel != NULL) {
1166
1167
1168
1169
1170
1171 if (channel->rescind_ref) {
1172 mutex_unlock(&vmbus_connection.channel_mutex);
1173 return;
1174 }
1175 channel->rescind_ref = true;
1176 }
1177 mutex_unlock(&vmbus_connection.channel_mutex);
1178
1179 if (channel == NULL) {
1180
1181
1182
1183
1184
1185 return;
1186 }
1187
1188 clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
1189 is_sub_channel(channel);
1190
1191
1192
1193
1194 vmbus_reset_channel_cb(channel);
1195
1196
1197
1198
1199 vmbus_rescind_cleanup(channel);
1200 while (READ_ONCE(channel->probe_done) == false) {
1201
1202
1203
1204
1205 msleep(1);
1206 }
1207
1208
1209
1210
1211
1212 if (channel->device_obj) {
1213 if (channel->chn_rescind_callback) {
1214 channel->chn_rescind_callback(channel);
1215
1216 if (clean_up_chan_for_suspend)
1217 check_ready_for_suspend_event();
1218
1219 return;
1220 }
1221
1222
1223
1224
1225 dev = get_device(&channel->device_obj->device);
1226 if (dev) {
1227 vmbus_device_unregister(channel->device_obj);
1228 put_device(dev);
1229 }
1230 } else if (channel->primary_channel != NULL) {
1231
1232
1233
1234
1235
1236
1237
1238 mutex_lock(&vmbus_connection.channel_mutex);
1239 if (channel->state == CHANNEL_OPEN_STATE) {
1240
1241
1242
1243
1244 hv_process_channel_removal(channel);
1245 } else {
1246 complete(&channel->rescind_event);
1247 }
1248 mutex_unlock(&vmbus_connection.channel_mutex);
1249 }
1250
1251
1252
1253 if (clean_up_chan_for_suspend)
1254 check_ready_for_suspend_event();
1255 }
1256
1257 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
1258 {
1259 BUG_ON(!is_hvsock_channel(channel));
1260
1261
1262 while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
1263 msleep(1);
1264
1265 vmbus_device_unregister(channel->device_obj);
1266 }
1267 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
1268
1269
1270
1271
1272
1273
1274
1275
1276 static void vmbus_onoffers_delivered(
1277 struct vmbus_channel_message_header *hdr)
1278 {
1279 }
1280
1281
1282
1283
1284
1285
1286
1287
1288 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
1289 {
1290 struct vmbus_channel_open_result *result;
1291 struct vmbus_channel_msginfo *msginfo;
1292 struct vmbus_channel_message_header *requestheader;
1293 struct vmbus_channel_open_channel *openmsg;
1294 unsigned long flags;
1295
1296 result = (struct vmbus_channel_open_result *)hdr;
1297
1298 trace_vmbus_onopen_result(result);
1299
1300
1301
1302
1303 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1304
1305 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1306 msglistentry) {
1307 requestheader =
1308 (struct vmbus_channel_message_header *)msginfo->msg;
1309
1310 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
1311 openmsg =
1312 (struct vmbus_channel_open_channel *)msginfo->msg;
1313 if (openmsg->child_relid == result->child_relid &&
1314 openmsg->openid == result->openid) {
1315 memcpy(&msginfo->response.open_result,
1316 result,
1317 sizeof(
1318 struct vmbus_channel_open_result));
1319 complete(&msginfo->waitevent);
1320 break;
1321 }
1322 }
1323 }
1324 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1325 }
1326
1327
1328
1329
1330
1331
1332
1333
1334 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1335 {
1336 struct vmbus_channel_gpadl_created *gpadlcreated;
1337 struct vmbus_channel_msginfo *msginfo;
1338 struct vmbus_channel_message_header *requestheader;
1339 struct vmbus_channel_gpadl_header *gpadlheader;
1340 unsigned long flags;
1341
1342 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1343
1344 trace_vmbus_ongpadl_created(gpadlcreated);
1345
1346
1347
1348
1349
1350 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1351
1352 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1353 msglistentry) {
1354 requestheader =
1355 (struct vmbus_channel_message_header *)msginfo->msg;
1356
1357 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1358 gpadlheader =
1359 (struct vmbus_channel_gpadl_header *)requestheader;
1360
1361 if ((gpadlcreated->child_relid ==
1362 gpadlheader->child_relid) &&
1363 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1364 memcpy(&msginfo->response.gpadl_created,
1365 gpadlcreated,
1366 sizeof(
1367 struct vmbus_channel_gpadl_created));
1368 complete(&msginfo->waitevent);
1369 break;
1370 }
1371 }
1372 }
1373 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1374 }
1375
1376
1377
1378
1379
1380
1381
1382 static void vmbus_onmodifychannel_response(struct vmbus_channel_message_header *hdr)
1383 {
1384 struct vmbus_channel_modifychannel_response *response;
1385 struct vmbus_channel_msginfo *msginfo;
1386 unsigned long flags;
1387
1388 response = (struct vmbus_channel_modifychannel_response *)hdr;
1389
1390 trace_vmbus_onmodifychannel_response(response);
1391
1392
1393
1394
1395 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1396
1397 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) {
1398 struct vmbus_channel_message_header *responseheader =
1399 (struct vmbus_channel_message_header *)msginfo->msg;
1400
1401 if (responseheader->msgtype == CHANNELMSG_MODIFYCHANNEL) {
1402 struct vmbus_channel_modifychannel *modifymsg;
1403
1404 modifymsg = (struct vmbus_channel_modifychannel *)msginfo->msg;
1405 if (modifymsg->child_relid == response->child_relid) {
1406 memcpy(&msginfo->response.modify_response, response,
1407 sizeof(*response));
1408 complete(&msginfo->waitevent);
1409 break;
1410 }
1411 }
1412 }
1413 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1414 }
1415
1416
1417
1418
1419
1420
1421
1422
1423 static void vmbus_ongpadl_torndown(
1424 struct vmbus_channel_message_header *hdr)
1425 {
1426 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1427 struct vmbus_channel_msginfo *msginfo;
1428 struct vmbus_channel_message_header *requestheader;
1429 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1430 unsigned long flags;
1431
1432 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1433
1434 trace_vmbus_ongpadl_torndown(gpadl_torndown);
1435
1436
1437
1438
1439 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1440
1441 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1442 msglistentry) {
1443 requestheader =
1444 (struct vmbus_channel_message_header *)msginfo->msg;
1445
1446 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1447 gpadl_teardown =
1448 (struct vmbus_channel_gpadl_teardown *)requestheader;
1449
1450 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1451 memcpy(&msginfo->response.gpadl_torndown,
1452 gpadl_torndown,
1453 sizeof(
1454 struct vmbus_channel_gpadl_torndown));
1455 complete(&msginfo->waitevent);
1456 break;
1457 }
1458 }
1459 }
1460 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1461 }
1462
1463
1464
1465
1466
1467
1468
1469
1470 static void vmbus_onversion_response(
1471 struct vmbus_channel_message_header *hdr)
1472 {
1473 struct vmbus_channel_msginfo *msginfo;
1474 struct vmbus_channel_message_header *requestheader;
1475 struct vmbus_channel_version_response *version_response;
1476 unsigned long flags;
1477
1478 version_response = (struct vmbus_channel_version_response *)hdr;
1479
1480 trace_vmbus_onversion_response(version_response);
1481
1482 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1483
1484 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1485 msglistentry) {
1486 requestheader =
1487 (struct vmbus_channel_message_header *)msginfo->msg;
1488
1489 if (requestheader->msgtype ==
1490 CHANNELMSG_INITIATE_CONTACT) {
1491 memcpy(&msginfo->response.version_response,
1492 version_response,
1493 sizeof(struct vmbus_channel_version_response));
1494 complete(&msginfo->waitevent);
1495 }
1496 }
1497 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1498 }
1499
1500
1501 const struct vmbus_channel_message_table_entry
1502 channel_message_table[CHANNELMSG_COUNT] = {
1503 { CHANNELMSG_INVALID, 0, NULL, 0},
1504 { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
1505 sizeof(struct vmbus_channel_offer_channel)},
1506 { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
1507 sizeof(struct vmbus_channel_rescind_offer) },
1508 { CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
1509 { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
1510 { CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
1511 { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
1512 sizeof(struct vmbus_channel_open_result)},
1513 { CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
1514 { CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
1515 { CHANNELMSG_GPADL_BODY, 0, NULL, 0},
1516 { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
1517 sizeof(struct vmbus_channel_gpadl_created)},
1518 { CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
1519 { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
1520 sizeof(struct vmbus_channel_gpadl_torndown) },
1521 { CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
1522 { CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
1523 { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
1524 sizeof(struct vmbus_channel_version_response)},
1525 { CHANNELMSG_UNLOAD, 0, NULL, 0},
1526 { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
1527 { CHANNELMSG_18, 0, NULL, 0},
1528 { CHANNELMSG_19, 0, NULL, 0},
1529 { CHANNELMSG_20, 0, NULL, 0},
1530 { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
1531 { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
1532 { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
1533 { CHANNELMSG_MODIFYCHANNEL_RESPONSE, 1, vmbus_onmodifychannel_response,
1534 sizeof(struct vmbus_channel_modifychannel_response)},
1535 };
1536
1537
1538
1539
1540
1541
1542 void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
1543 {
1544 trace_vmbus_on_message(hdr);
1545
1546
1547
1548
1549
1550 channel_message_table[hdr->msgtype].message_handler(hdr);
1551 }
1552
1553
1554
1555
1556 int vmbus_request_offers(void)
1557 {
1558 struct vmbus_channel_message_header *msg;
1559 struct vmbus_channel_msginfo *msginfo;
1560 int ret;
1561
1562 msginfo = kzalloc(sizeof(*msginfo) +
1563 sizeof(struct vmbus_channel_message_header),
1564 GFP_KERNEL);
1565 if (!msginfo)
1566 return -ENOMEM;
1567
1568 msg = (struct vmbus_channel_message_header *)msginfo->msg;
1569
1570 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1571
1572 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1573 true);
1574
1575 trace_vmbus_request_offers(ret);
1576
1577 if (ret != 0) {
1578 pr_err("Unable to request offers - %d\n", ret);
1579
1580 goto cleanup;
1581 }
1582
1583 cleanup:
1584 kfree(msginfo);
1585
1586 return ret;
1587 }
1588
1589 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1590 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1591 {
1592 primary_channel->sc_creation_callback = sc_cr_cb;
1593 }
1594 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1595
1596 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1597 void (*chn_rescind_cb)(struct vmbus_channel *))
1598 {
1599 channel->chn_rescind_callback = chn_rescind_cb;
1600 }
1601 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);