0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0011
0012 #include <linux/init.h>
0013 #include <linux/module.h>
0014 #include <linux/device.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/sysctl.h>
0017 #include <linux/slab.h>
0018 #include <linux/acpi.h>
0019 #include <linux/completion.h>
0020 #include <linux/hyperv.h>
0021 #include <linux/kernel_stat.h>
0022 #include <linux/clockchips.h>
0023 #include <linux/cpu.h>
0024 #include <linux/sched/isolation.h>
0025 #include <linux/sched/task_stack.h>
0026
0027 #include <linux/delay.h>
0028 #include <linux/notifier.h>
0029 #include <linux/panic_notifier.h>
0030 #include <linux/ptrace.h>
0031 #include <linux/screen_info.h>
0032 #include <linux/kdebug.h>
0033 #include <linux/efi.h>
0034 #include <linux/random.h>
0035 #include <linux/kernel.h>
0036 #include <linux/syscore_ops.h>
0037 #include <linux/dma-map-ops.h>
0038 #include <linux/pci.h>
0039 #include <clocksource/hyperv_timer.h>
0040 #include "hyperv_vmbus.h"
0041
0042 struct vmbus_dynid {
0043 struct list_head node;
0044 struct hv_vmbus_device_id id;
0045 };
0046
0047 static struct acpi_device *hv_acpi_dev;
0048
0049 static struct completion probe_event;
0050
0051 static int hyperv_cpuhp_online;
0052
0053 static void *hv_panic_page;
0054
0055 static long __percpu *vmbus_evt;
0056
0057
0058 int vmbus_irq;
0059 int vmbus_interrupt;
0060
0061
0062
0063
0064
0065
0066 static int sysctl_record_panic_msg = 1;
0067
0068 static int hyperv_report_reg(void)
0069 {
0070 return !sysctl_record_panic_msg || !hv_panic_page;
0071 }
0072
0073 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
0074 void *args)
0075 {
0076 struct pt_regs *regs;
0077
0078 vmbus_initiate_unload(true);
0079
0080
0081
0082
0083
0084
0085 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
0086 && hyperv_report_reg()) {
0087 regs = current_pt_regs();
0088 hyperv_report_panic(regs, val, false);
0089 }
0090 return NOTIFY_DONE;
0091 }
0092
0093 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
0094 void *args)
0095 {
0096 struct die_args *die = args;
0097 struct pt_regs *regs = die->regs;
0098
0099
0100 if (val != DIE_OOPS)
0101 return NOTIFY_DONE;
0102
0103
0104
0105
0106
0107
0108 if (hyperv_report_reg())
0109 hyperv_report_panic(regs, val, true);
0110 return NOTIFY_DONE;
0111 }
0112
0113 static struct notifier_block hyperv_die_block = {
0114 .notifier_call = hyperv_die_event,
0115 };
0116 static struct notifier_block hyperv_panic_block = {
0117 .notifier_call = hyperv_panic_event,
0118 };
0119
0120 static const char *fb_mmio_name = "fb_range";
0121 static struct resource *fb_mmio;
0122 static struct resource *hyperv_mmio;
0123 static DEFINE_MUTEX(hyperv_mmio_lock);
0124
0125 static int vmbus_exists(void)
0126 {
0127 if (hv_acpi_dev == NULL)
0128 return -ENODEV;
0129
0130 return 0;
0131 }
0132
0133 static u8 channel_monitor_group(const struct vmbus_channel *channel)
0134 {
0135 return (u8)channel->offermsg.monitorid / 32;
0136 }
0137
0138 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
0139 {
0140 return (u8)channel->offermsg.monitorid % 32;
0141 }
0142
0143 static u32 channel_pending(const struct vmbus_channel *channel,
0144 const struct hv_monitor_page *monitor_page)
0145 {
0146 u8 monitor_group = channel_monitor_group(channel);
0147
0148 return monitor_page->trigger_group[monitor_group].pending;
0149 }
0150
0151 static u32 channel_latency(const struct vmbus_channel *channel,
0152 const struct hv_monitor_page *monitor_page)
0153 {
0154 u8 monitor_group = channel_monitor_group(channel);
0155 u8 monitor_offset = channel_monitor_offset(channel);
0156
0157 return monitor_page->latency[monitor_group][monitor_offset];
0158 }
0159
0160 static u32 channel_conn_id(struct vmbus_channel *channel,
0161 struct hv_monitor_page *monitor_page)
0162 {
0163 u8 monitor_group = channel_monitor_group(channel);
0164 u8 monitor_offset = channel_monitor_offset(channel);
0165
0166 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
0167 }
0168
0169 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
0170 char *buf)
0171 {
0172 struct hv_device *hv_dev = device_to_hv_device(dev);
0173
0174 if (!hv_dev->channel)
0175 return -ENODEV;
0176 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
0177 }
0178 static DEVICE_ATTR_RO(id);
0179
0180 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
0181 char *buf)
0182 {
0183 struct hv_device *hv_dev = device_to_hv_device(dev);
0184
0185 if (!hv_dev->channel)
0186 return -ENODEV;
0187 return sprintf(buf, "%d\n", hv_dev->channel->state);
0188 }
0189 static DEVICE_ATTR_RO(state);
0190
0191 static ssize_t monitor_id_show(struct device *dev,
0192 struct device_attribute *dev_attr, char *buf)
0193 {
0194 struct hv_device *hv_dev = device_to_hv_device(dev);
0195
0196 if (!hv_dev->channel)
0197 return -ENODEV;
0198 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
0199 }
0200 static DEVICE_ATTR_RO(monitor_id);
0201
0202 static ssize_t class_id_show(struct device *dev,
0203 struct device_attribute *dev_attr, char *buf)
0204 {
0205 struct hv_device *hv_dev = device_to_hv_device(dev);
0206
0207 if (!hv_dev->channel)
0208 return -ENODEV;
0209 return sprintf(buf, "{%pUl}\n",
0210 &hv_dev->channel->offermsg.offer.if_type);
0211 }
0212 static DEVICE_ATTR_RO(class_id);
0213
0214 static ssize_t device_id_show(struct device *dev,
0215 struct device_attribute *dev_attr, char *buf)
0216 {
0217 struct hv_device *hv_dev = device_to_hv_device(dev);
0218
0219 if (!hv_dev->channel)
0220 return -ENODEV;
0221 return sprintf(buf, "{%pUl}\n",
0222 &hv_dev->channel->offermsg.offer.if_instance);
0223 }
0224 static DEVICE_ATTR_RO(device_id);
0225
0226 static ssize_t modalias_show(struct device *dev,
0227 struct device_attribute *dev_attr, char *buf)
0228 {
0229 struct hv_device *hv_dev = device_to_hv_device(dev);
0230
0231 return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
0232 }
0233 static DEVICE_ATTR_RO(modalias);
0234
0235 #ifdef CONFIG_NUMA
0236 static ssize_t numa_node_show(struct device *dev,
0237 struct device_attribute *attr, char *buf)
0238 {
0239 struct hv_device *hv_dev = device_to_hv_device(dev);
0240
0241 if (!hv_dev->channel)
0242 return -ENODEV;
0243
0244 return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
0245 }
0246 static DEVICE_ATTR_RO(numa_node);
0247 #endif
0248
0249 static ssize_t server_monitor_pending_show(struct device *dev,
0250 struct device_attribute *dev_attr,
0251 char *buf)
0252 {
0253 struct hv_device *hv_dev = device_to_hv_device(dev);
0254
0255 if (!hv_dev->channel)
0256 return -ENODEV;
0257 return sprintf(buf, "%d\n",
0258 channel_pending(hv_dev->channel,
0259 vmbus_connection.monitor_pages[0]));
0260 }
0261 static DEVICE_ATTR_RO(server_monitor_pending);
0262
0263 static ssize_t client_monitor_pending_show(struct device *dev,
0264 struct device_attribute *dev_attr,
0265 char *buf)
0266 {
0267 struct hv_device *hv_dev = device_to_hv_device(dev);
0268
0269 if (!hv_dev->channel)
0270 return -ENODEV;
0271 return sprintf(buf, "%d\n",
0272 channel_pending(hv_dev->channel,
0273 vmbus_connection.monitor_pages[1]));
0274 }
0275 static DEVICE_ATTR_RO(client_monitor_pending);
0276
0277 static ssize_t server_monitor_latency_show(struct device *dev,
0278 struct device_attribute *dev_attr,
0279 char *buf)
0280 {
0281 struct hv_device *hv_dev = device_to_hv_device(dev);
0282
0283 if (!hv_dev->channel)
0284 return -ENODEV;
0285 return sprintf(buf, "%d\n",
0286 channel_latency(hv_dev->channel,
0287 vmbus_connection.monitor_pages[0]));
0288 }
0289 static DEVICE_ATTR_RO(server_monitor_latency);
0290
0291 static ssize_t client_monitor_latency_show(struct device *dev,
0292 struct device_attribute *dev_attr,
0293 char *buf)
0294 {
0295 struct hv_device *hv_dev = device_to_hv_device(dev);
0296
0297 if (!hv_dev->channel)
0298 return -ENODEV;
0299 return sprintf(buf, "%d\n",
0300 channel_latency(hv_dev->channel,
0301 vmbus_connection.monitor_pages[1]));
0302 }
0303 static DEVICE_ATTR_RO(client_monitor_latency);
0304
0305 static ssize_t server_monitor_conn_id_show(struct device *dev,
0306 struct device_attribute *dev_attr,
0307 char *buf)
0308 {
0309 struct hv_device *hv_dev = device_to_hv_device(dev);
0310
0311 if (!hv_dev->channel)
0312 return -ENODEV;
0313 return sprintf(buf, "%d\n",
0314 channel_conn_id(hv_dev->channel,
0315 vmbus_connection.monitor_pages[0]));
0316 }
0317 static DEVICE_ATTR_RO(server_monitor_conn_id);
0318
0319 static ssize_t client_monitor_conn_id_show(struct device *dev,
0320 struct device_attribute *dev_attr,
0321 char *buf)
0322 {
0323 struct hv_device *hv_dev = device_to_hv_device(dev);
0324
0325 if (!hv_dev->channel)
0326 return -ENODEV;
0327 return sprintf(buf, "%d\n",
0328 channel_conn_id(hv_dev->channel,
0329 vmbus_connection.monitor_pages[1]));
0330 }
0331 static DEVICE_ATTR_RO(client_monitor_conn_id);
0332
0333 static ssize_t out_intr_mask_show(struct device *dev,
0334 struct device_attribute *dev_attr, char *buf)
0335 {
0336 struct hv_device *hv_dev = device_to_hv_device(dev);
0337 struct hv_ring_buffer_debug_info outbound;
0338 int ret;
0339
0340 if (!hv_dev->channel)
0341 return -ENODEV;
0342
0343 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
0344 &outbound);
0345 if (ret < 0)
0346 return ret;
0347
0348 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
0349 }
0350 static DEVICE_ATTR_RO(out_intr_mask);
0351
0352 static ssize_t out_read_index_show(struct device *dev,
0353 struct device_attribute *dev_attr, char *buf)
0354 {
0355 struct hv_device *hv_dev = device_to_hv_device(dev);
0356 struct hv_ring_buffer_debug_info outbound;
0357 int ret;
0358
0359 if (!hv_dev->channel)
0360 return -ENODEV;
0361
0362 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
0363 &outbound);
0364 if (ret < 0)
0365 return ret;
0366 return sprintf(buf, "%d\n", outbound.current_read_index);
0367 }
0368 static DEVICE_ATTR_RO(out_read_index);
0369
0370 static ssize_t out_write_index_show(struct device *dev,
0371 struct device_attribute *dev_attr,
0372 char *buf)
0373 {
0374 struct hv_device *hv_dev = device_to_hv_device(dev);
0375 struct hv_ring_buffer_debug_info outbound;
0376 int ret;
0377
0378 if (!hv_dev->channel)
0379 return -ENODEV;
0380
0381 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
0382 &outbound);
0383 if (ret < 0)
0384 return ret;
0385 return sprintf(buf, "%d\n", outbound.current_write_index);
0386 }
0387 static DEVICE_ATTR_RO(out_write_index);
0388
0389 static ssize_t out_read_bytes_avail_show(struct device *dev,
0390 struct device_attribute *dev_attr,
0391 char *buf)
0392 {
0393 struct hv_device *hv_dev = device_to_hv_device(dev);
0394 struct hv_ring_buffer_debug_info outbound;
0395 int ret;
0396
0397 if (!hv_dev->channel)
0398 return -ENODEV;
0399
0400 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
0401 &outbound);
0402 if (ret < 0)
0403 return ret;
0404 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
0405 }
0406 static DEVICE_ATTR_RO(out_read_bytes_avail);
0407
0408 static ssize_t out_write_bytes_avail_show(struct device *dev,
0409 struct device_attribute *dev_attr,
0410 char *buf)
0411 {
0412 struct hv_device *hv_dev = device_to_hv_device(dev);
0413 struct hv_ring_buffer_debug_info outbound;
0414 int ret;
0415
0416 if (!hv_dev->channel)
0417 return -ENODEV;
0418
0419 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
0420 &outbound);
0421 if (ret < 0)
0422 return ret;
0423 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
0424 }
0425 static DEVICE_ATTR_RO(out_write_bytes_avail);
0426
0427 static ssize_t in_intr_mask_show(struct device *dev,
0428 struct device_attribute *dev_attr, char *buf)
0429 {
0430 struct hv_device *hv_dev = device_to_hv_device(dev);
0431 struct hv_ring_buffer_debug_info inbound;
0432 int ret;
0433
0434 if (!hv_dev->channel)
0435 return -ENODEV;
0436
0437 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
0438 if (ret < 0)
0439 return ret;
0440
0441 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
0442 }
0443 static DEVICE_ATTR_RO(in_intr_mask);
0444
0445 static ssize_t in_read_index_show(struct device *dev,
0446 struct device_attribute *dev_attr, char *buf)
0447 {
0448 struct hv_device *hv_dev = device_to_hv_device(dev);
0449 struct hv_ring_buffer_debug_info inbound;
0450 int ret;
0451
0452 if (!hv_dev->channel)
0453 return -ENODEV;
0454
0455 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
0456 if (ret < 0)
0457 return ret;
0458
0459 return sprintf(buf, "%d\n", inbound.current_read_index);
0460 }
0461 static DEVICE_ATTR_RO(in_read_index);
0462
0463 static ssize_t in_write_index_show(struct device *dev,
0464 struct device_attribute *dev_attr, char *buf)
0465 {
0466 struct hv_device *hv_dev = device_to_hv_device(dev);
0467 struct hv_ring_buffer_debug_info inbound;
0468 int ret;
0469
0470 if (!hv_dev->channel)
0471 return -ENODEV;
0472
0473 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
0474 if (ret < 0)
0475 return ret;
0476
0477 return sprintf(buf, "%d\n", inbound.current_write_index);
0478 }
0479 static DEVICE_ATTR_RO(in_write_index);
0480
0481 static ssize_t in_read_bytes_avail_show(struct device *dev,
0482 struct device_attribute *dev_attr,
0483 char *buf)
0484 {
0485 struct hv_device *hv_dev = device_to_hv_device(dev);
0486 struct hv_ring_buffer_debug_info inbound;
0487 int ret;
0488
0489 if (!hv_dev->channel)
0490 return -ENODEV;
0491
0492 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
0493 if (ret < 0)
0494 return ret;
0495
0496 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
0497 }
0498 static DEVICE_ATTR_RO(in_read_bytes_avail);
0499
0500 static ssize_t in_write_bytes_avail_show(struct device *dev,
0501 struct device_attribute *dev_attr,
0502 char *buf)
0503 {
0504 struct hv_device *hv_dev = device_to_hv_device(dev);
0505 struct hv_ring_buffer_debug_info inbound;
0506 int ret;
0507
0508 if (!hv_dev->channel)
0509 return -ENODEV;
0510
0511 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
0512 if (ret < 0)
0513 return ret;
0514
0515 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
0516 }
0517 static DEVICE_ATTR_RO(in_write_bytes_avail);
0518
0519 static ssize_t channel_vp_mapping_show(struct device *dev,
0520 struct device_attribute *dev_attr,
0521 char *buf)
0522 {
0523 struct hv_device *hv_dev = device_to_hv_device(dev);
0524 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
0525 int buf_size = PAGE_SIZE, n_written, tot_written;
0526 struct list_head *cur;
0527
0528 if (!channel)
0529 return -ENODEV;
0530
0531 mutex_lock(&vmbus_connection.channel_mutex);
0532
0533 tot_written = snprintf(buf, buf_size, "%u:%u\n",
0534 channel->offermsg.child_relid, channel->target_cpu);
0535
0536 list_for_each(cur, &channel->sc_list) {
0537 if (tot_written >= buf_size - 1)
0538 break;
0539
0540 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
0541 n_written = scnprintf(buf + tot_written,
0542 buf_size - tot_written,
0543 "%u:%u\n",
0544 cur_sc->offermsg.child_relid,
0545 cur_sc->target_cpu);
0546 tot_written += n_written;
0547 }
0548
0549 mutex_unlock(&vmbus_connection.channel_mutex);
0550
0551 return tot_written;
0552 }
0553 static DEVICE_ATTR_RO(channel_vp_mapping);
0554
0555 static ssize_t vendor_show(struct device *dev,
0556 struct device_attribute *dev_attr,
0557 char *buf)
0558 {
0559 struct hv_device *hv_dev = device_to_hv_device(dev);
0560
0561 return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
0562 }
0563 static DEVICE_ATTR_RO(vendor);
0564
0565 static ssize_t device_show(struct device *dev,
0566 struct device_attribute *dev_attr,
0567 char *buf)
0568 {
0569 struct hv_device *hv_dev = device_to_hv_device(dev);
0570
0571 return sprintf(buf, "0x%x\n", hv_dev->device_id);
0572 }
0573 static DEVICE_ATTR_RO(device);
0574
0575 static ssize_t driver_override_store(struct device *dev,
0576 struct device_attribute *attr,
0577 const char *buf, size_t count)
0578 {
0579 struct hv_device *hv_dev = device_to_hv_device(dev);
0580 int ret;
0581
0582 ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
0583 if (ret)
0584 return ret;
0585
0586 return count;
0587 }
0588
0589 static ssize_t driver_override_show(struct device *dev,
0590 struct device_attribute *attr, char *buf)
0591 {
0592 struct hv_device *hv_dev = device_to_hv_device(dev);
0593 ssize_t len;
0594
0595 device_lock(dev);
0596 len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
0597 device_unlock(dev);
0598
0599 return len;
0600 }
0601 static DEVICE_ATTR_RW(driver_override);
0602
0603
0604 static struct attribute *vmbus_dev_attrs[] = {
0605 &dev_attr_id.attr,
0606 &dev_attr_state.attr,
0607 &dev_attr_monitor_id.attr,
0608 &dev_attr_class_id.attr,
0609 &dev_attr_device_id.attr,
0610 &dev_attr_modalias.attr,
0611 #ifdef CONFIG_NUMA
0612 &dev_attr_numa_node.attr,
0613 #endif
0614 &dev_attr_server_monitor_pending.attr,
0615 &dev_attr_client_monitor_pending.attr,
0616 &dev_attr_server_monitor_latency.attr,
0617 &dev_attr_client_monitor_latency.attr,
0618 &dev_attr_server_monitor_conn_id.attr,
0619 &dev_attr_client_monitor_conn_id.attr,
0620 &dev_attr_out_intr_mask.attr,
0621 &dev_attr_out_read_index.attr,
0622 &dev_attr_out_write_index.attr,
0623 &dev_attr_out_read_bytes_avail.attr,
0624 &dev_attr_out_write_bytes_avail.attr,
0625 &dev_attr_in_intr_mask.attr,
0626 &dev_attr_in_read_index.attr,
0627 &dev_attr_in_write_index.attr,
0628 &dev_attr_in_read_bytes_avail.attr,
0629 &dev_attr_in_write_bytes_avail.attr,
0630 &dev_attr_channel_vp_mapping.attr,
0631 &dev_attr_vendor.attr,
0632 &dev_attr_device.attr,
0633 &dev_attr_driver_override.attr,
0634 NULL,
0635 };
0636
0637
0638
0639
0640
0641 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
0642 struct attribute *attr, int idx)
0643 {
0644 struct device *dev = kobj_to_dev(kobj);
0645 const struct hv_device *hv_dev = device_to_hv_device(dev);
0646
0647
0648 if (!hv_dev->channel->offermsg.monitor_allocated &&
0649 (attr == &dev_attr_monitor_id.attr ||
0650 attr == &dev_attr_server_monitor_pending.attr ||
0651 attr == &dev_attr_client_monitor_pending.attr ||
0652 attr == &dev_attr_server_monitor_latency.attr ||
0653 attr == &dev_attr_client_monitor_latency.attr ||
0654 attr == &dev_attr_server_monitor_conn_id.attr ||
0655 attr == &dev_attr_client_monitor_conn_id.attr))
0656 return 0;
0657
0658 return attr->mode;
0659 }
0660
0661 static const struct attribute_group vmbus_dev_group = {
0662 .attrs = vmbus_dev_attrs,
0663 .is_visible = vmbus_dev_attr_is_visible
0664 };
0665 __ATTRIBUTE_GROUPS(vmbus_dev);
0666
0667
0668 static ssize_t hibernation_show(struct bus_type *bus, char *buf)
0669 {
0670 return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
0671 }
0672
0673 static BUS_ATTR_RO(hibernation);
0674
0675 static struct attribute *vmbus_bus_attrs[] = {
0676 &bus_attr_hibernation.attr,
0677 NULL,
0678 };
0679 static const struct attribute_group vmbus_bus_group = {
0680 .attrs = vmbus_bus_attrs,
0681 };
0682 __ATTRIBUTE_GROUPS(vmbus_bus);
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
0696 {
0697 struct hv_device *dev = device_to_hv_device(device);
0698 const char *format = "MODALIAS=vmbus:%*phN";
0699
0700 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
0701 }
0702
0703 static const struct hv_vmbus_device_id *
0704 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
0705 {
0706 if (id == NULL)
0707 return NULL;
0708
0709 for (; !guid_is_null(&id->guid); id++)
0710 if (guid_equal(&id->guid, guid))
0711 return id;
0712
0713 return NULL;
0714 }
0715
0716 static const struct hv_vmbus_device_id *
0717 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
0718 {
0719 const struct hv_vmbus_device_id *id = NULL;
0720 struct vmbus_dynid *dynid;
0721
0722 spin_lock(&drv->dynids.lock);
0723 list_for_each_entry(dynid, &drv->dynids.list, node) {
0724 if (guid_equal(&dynid->id.guid, guid)) {
0725 id = &dynid->id;
0726 break;
0727 }
0728 }
0729 spin_unlock(&drv->dynids.lock);
0730
0731 return id;
0732 }
0733
0734 static const struct hv_vmbus_device_id vmbus_device_null;
0735
0736
0737
0738
0739
0740 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
0741 struct hv_device *dev)
0742 {
0743 const guid_t *guid = &dev->dev_type;
0744 const struct hv_vmbus_device_id *id;
0745
0746
0747 if (dev->driver_override && strcmp(dev->driver_override, drv->name))
0748 return NULL;
0749
0750
0751 id = hv_vmbus_dynid_match(drv, guid);
0752 if (!id)
0753 id = hv_vmbus_dev_match(drv->id_table, guid);
0754
0755
0756 if (!id && dev->driver_override)
0757 id = &vmbus_device_null;
0758
0759 return id;
0760 }
0761
0762
0763 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
0764 {
0765 struct vmbus_dynid *dynid;
0766
0767 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
0768 if (!dynid)
0769 return -ENOMEM;
0770
0771 dynid->id.guid = *guid;
0772
0773 spin_lock(&drv->dynids.lock);
0774 list_add_tail(&dynid->node, &drv->dynids.list);
0775 spin_unlock(&drv->dynids.lock);
0776
0777 return driver_attach(&drv->driver);
0778 }
0779
0780 static void vmbus_free_dynids(struct hv_driver *drv)
0781 {
0782 struct vmbus_dynid *dynid, *n;
0783
0784 spin_lock(&drv->dynids.lock);
0785 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
0786 list_del(&dynid->node);
0787 kfree(dynid);
0788 }
0789 spin_unlock(&drv->dynids.lock);
0790 }
0791
0792
0793
0794
0795
0796
0797 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
0798 size_t count)
0799 {
0800 struct hv_driver *drv = drv_to_hv_drv(driver);
0801 guid_t guid;
0802 ssize_t retval;
0803
0804 retval = guid_parse(buf, &guid);
0805 if (retval)
0806 return retval;
0807
0808 if (hv_vmbus_dynid_match(drv, &guid))
0809 return -EEXIST;
0810
0811 retval = vmbus_add_dynid(drv, &guid);
0812 if (retval)
0813 return retval;
0814 return count;
0815 }
0816 static DRIVER_ATTR_WO(new_id);
0817
0818
0819
0820
0821
0822
0823 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
0824 size_t count)
0825 {
0826 struct hv_driver *drv = drv_to_hv_drv(driver);
0827 struct vmbus_dynid *dynid, *n;
0828 guid_t guid;
0829 ssize_t retval;
0830
0831 retval = guid_parse(buf, &guid);
0832 if (retval)
0833 return retval;
0834
0835 retval = -ENODEV;
0836 spin_lock(&drv->dynids.lock);
0837 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
0838 struct hv_vmbus_device_id *id = &dynid->id;
0839
0840 if (guid_equal(&id->guid, &guid)) {
0841 list_del(&dynid->node);
0842 kfree(dynid);
0843 retval = count;
0844 break;
0845 }
0846 }
0847 spin_unlock(&drv->dynids.lock);
0848
0849 return retval;
0850 }
0851 static DRIVER_ATTR_WO(remove_id);
0852
0853 static struct attribute *vmbus_drv_attrs[] = {
0854 &driver_attr_new_id.attr,
0855 &driver_attr_remove_id.attr,
0856 NULL,
0857 };
0858 ATTRIBUTE_GROUPS(vmbus_drv);
0859
0860
0861
0862
0863
0864 static int vmbus_match(struct device *device, struct device_driver *driver)
0865 {
0866 struct hv_driver *drv = drv_to_hv_drv(driver);
0867 struct hv_device *hv_dev = device_to_hv_device(device);
0868
0869
0870 if (is_hvsock_channel(hv_dev->channel))
0871 return drv->hvsock;
0872
0873 if (hv_vmbus_get_id(drv, hv_dev))
0874 return 1;
0875
0876 return 0;
0877 }
0878
0879
0880
0881
0882 static int vmbus_probe(struct device *child_device)
0883 {
0884 int ret = 0;
0885 struct hv_driver *drv =
0886 drv_to_hv_drv(child_device->driver);
0887 struct hv_device *dev = device_to_hv_device(child_device);
0888 const struct hv_vmbus_device_id *dev_id;
0889
0890 dev_id = hv_vmbus_get_id(drv, dev);
0891 if (drv->probe) {
0892 ret = drv->probe(dev, dev_id);
0893 if (ret != 0)
0894 pr_err("probe failed for device %s (%d)\n",
0895 dev_name(child_device), ret);
0896
0897 } else {
0898 pr_err("probe not set for driver %s\n",
0899 dev_name(child_device));
0900 ret = -ENODEV;
0901 }
0902 return ret;
0903 }
0904
0905
0906
0907
0908 static int vmbus_dma_configure(struct device *child_device)
0909 {
0910
0911
0912
0913
0914
0915 hv_setup_dma_ops(child_device,
0916 device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
0917 return 0;
0918 }
0919
0920
0921
0922
0923 static void vmbus_remove(struct device *child_device)
0924 {
0925 struct hv_driver *drv;
0926 struct hv_device *dev = device_to_hv_device(child_device);
0927
0928 if (child_device->driver) {
0929 drv = drv_to_hv_drv(child_device->driver);
0930 if (drv->remove)
0931 drv->remove(dev);
0932 }
0933 }
0934
0935
0936
0937
0938 static void vmbus_shutdown(struct device *child_device)
0939 {
0940 struct hv_driver *drv;
0941 struct hv_device *dev = device_to_hv_device(child_device);
0942
0943
0944
0945 if (!child_device->driver)
0946 return;
0947
0948 drv = drv_to_hv_drv(child_device->driver);
0949
0950 if (drv->shutdown)
0951 drv->shutdown(dev);
0952 }
0953
0954 #ifdef CONFIG_PM_SLEEP
0955
0956
0957
0958 static int vmbus_suspend(struct device *child_device)
0959 {
0960 struct hv_driver *drv;
0961 struct hv_device *dev = device_to_hv_device(child_device);
0962
0963
0964 if (!child_device->driver)
0965 return 0;
0966
0967 drv = drv_to_hv_drv(child_device->driver);
0968 if (!drv->suspend)
0969 return -EOPNOTSUPP;
0970
0971 return drv->suspend(dev);
0972 }
0973
0974
0975
0976
0977 static int vmbus_resume(struct device *child_device)
0978 {
0979 struct hv_driver *drv;
0980 struct hv_device *dev = device_to_hv_device(child_device);
0981
0982
0983 if (!child_device->driver)
0984 return 0;
0985
0986 drv = drv_to_hv_drv(child_device->driver);
0987 if (!drv->resume)
0988 return -EOPNOTSUPP;
0989
0990 return drv->resume(dev);
0991 }
0992 #else
0993 #define vmbus_suspend NULL
0994 #define vmbus_resume NULL
0995 #endif
0996
0997
0998
0999
1000 static void vmbus_device_release(struct device *device)
1001 {
1002 struct hv_device *hv_dev = device_to_hv_device(device);
1003 struct vmbus_channel *channel = hv_dev->channel;
1004
1005 hv_debug_rm_dev_dir(hv_dev);
1006
1007 mutex_lock(&vmbus_connection.channel_mutex);
1008 hv_process_channel_removal(channel);
1009 mutex_unlock(&vmbus_connection.channel_mutex);
1010 kfree(hv_dev);
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 static const struct dev_pm_ops vmbus_pm = {
1024 .suspend_noirq = NULL,
1025 .resume_noirq = NULL,
1026 .freeze_noirq = vmbus_suspend,
1027 .thaw_noirq = vmbus_resume,
1028 .poweroff_noirq = vmbus_suspend,
1029 .restore_noirq = vmbus_resume,
1030 };
1031
1032
1033 static struct bus_type hv_bus = {
1034 .name = "vmbus",
1035 .match = vmbus_match,
1036 .shutdown = vmbus_shutdown,
1037 .remove = vmbus_remove,
1038 .probe = vmbus_probe,
1039 .uevent = vmbus_uevent,
1040 .dma_configure = vmbus_dma_configure,
1041 .dev_groups = vmbus_dev_groups,
1042 .drv_groups = vmbus_drv_groups,
1043 .bus_groups = vmbus_bus_groups,
1044 .pm = &vmbus_pm,
1045 };
1046
1047 struct onmessage_work_context {
1048 struct work_struct work;
1049 struct {
1050 struct hv_message_header header;
1051 u8 payload[];
1052 } msg;
1053 };
1054
1055 static void vmbus_onmessage_work(struct work_struct *work)
1056 {
1057 struct onmessage_work_context *ctx;
1058
1059
1060 if (vmbus_connection.conn_state == DISCONNECTED)
1061 return;
1062
1063 ctx = container_of(work, struct onmessage_work_context,
1064 work);
1065 vmbus_onmessage((struct vmbus_channel_message_header *)
1066 &ctx->msg.payload);
1067 kfree(ctx);
1068 }
1069
1070 void vmbus_on_msg_dpc(unsigned long data)
1071 {
1072 struct hv_per_cpu_context *hv_cpu = (void *)data;
1073 void *page_addr = hv_cpu->synic_message_page;
1074 struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1075 VMBUS_MESSAGE_SINT;
1076 struct vmbus_channel_message_header *hdr;
1077 enum vmbus_channel_message_type msgtype;
1078 const struct vmbus_channel_message_table_entry *entry;
1079 struct onmessage_work_context *ctx;
1080 __u8 payload_size;
1081 u32 message_type;
1082
1083
1084
1085
1086
1087
1088 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1089
1090
1091
1092
1093
1094
1095
1096 memcpy(&msg_copy, msg, sizeof(struct hv_message));
1097
1098 message_type = msg_copy.header.message_type;
1099 if (message_type == HVMSG_NONE)
1100
1101 return;
1102
1103 hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1104 msgtype = hdr->msgtype;
1105
1106 trace_vmbus_on_msg_dpc(hdr);
1107
1108 if (msgtype >= CHANNELMSG_COUNT) {
1109 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1110 goto msg_handled;
1111 }
1112
1113 payload_size = msg_copy.header.payload_size;
1114 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1115 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1116 goto msg_handled;
1117 }
1118
1119 entry = &channel_message_table[msgtype];
1120
1121 if (!entry->message_handler)
1122 goto msg_handled;
1123
1124 if (payload_size < entry->min_payload_len) {
1125 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1126 goto msg_handled;
1127 }
1128
1129 if (entry->handler_type == VMHT_BLOCKING) {
1130 ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
1131 if (ctx == NULL)
1132 return;
1133
1134 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1135 memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
1136
1137
1138
1139
1140
1141
1142
1143
1144 switch (msgtype) {
1145 case CHANNELMSG_RESCIND_CHANNELOFFER:
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 if (vmbus_connection.ignore_any_offer_msg)
1165 break;
1166 queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
1167 break;
1168
1169 case CHANNELMSG_OFFERCHANNEL:
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 if (vmbus_connection.ignore_any_offer_msg)
1193 break;
1194 atomic_inc(&vmbus_connection.offer_in_progress);
1195 fallthrough;
1196
1197 default:
1198 queue_work(vmbus_connection.work_queue, &ctx->work);
1199 }
1200 } else
1201 entry->message_handler(hdr);
1202
1203 msg_handled:
1204 vmbus_signal_eom(msg, message_type);
1205 }
1206
1207 #ifdef CONFIG_PM_SLEEP
1208
1209
1210
1211
1212 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1213 {
1214 struct onmessage_work_context *ctx;
1215 struct vmbus_channel_rescind_offer *rescind;
1216
1217 WARN_ON(!is_hvsock_channel(channel));
1218
1219
1220
1221
1222
1223 ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1224 GFP_KERNEL | __GFP_NOFAIL);
1225
1226
1227
1228
1229
1230 ctx->msg.header.message_type = 1;
1231 ctx->msg.header.payload_size = sizeof(*rescind);
1232
1233
1234 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1235 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1236 rescind->child_relid = channel->offermsg.child_relid;
1237
1238 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1239
1240 queue_work(vmbus_connection.work_queue, &ctx->work);
1241 }
1242 #endif
1243
1244
1245
1246
1247 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1248 {
1249 unsigned long *recv_int_page;
1250 u32 maxbits, relid;
1251
1252
1253
1254
1255
1256 void *page_addr = hv_cpu->synic_event_page;
1257 union hv_synic_event_flags *event
1258 = (union hv_synic_event_flags *)page_addr +
1259 VMBUS_MESSAGE_SINT;
1260
1261 maxbits = HV_EVENT_FLAGS_COUNT;
1262 recv_int_page = event->flags;
1263
1264 if (unlikely(!recv_int_page))
1265 return;
1266
1267 for_each_set_bit(relid, recv_int_page, maxbits) {
1268 void (*callback_fn)(void *context);
1269 struct vmbus_channel *channel;
1270
1271 if (!sync_test_and_clear_bit(relid, recv_int_page))
1272 continue;
1273
1274
1275 if (relid == 0)
1276 continue;
1277
1278
1279
1280
1281
1282
1283
1284 rcu_read_lock();
1285
1286
1287 channel = relid2channel(relid);
1288 if (channel == NULL)
1289 goto sched_unlock_rcu;
1290
1291 if (channel->rescind)
1292 goto sched_unlock_rcu;
1293
1294
1295
1296
1297
1298
1299
1300
1301 spin_lock(&channel->sched_lock);
1302
1303 callback_fn = channel->onchannel_callback;
1304 if (unlikely(callback_fn == NULL))
1305 goto sched_unlock;
1306
1307 trace_vmbus_chan_sched(channel);
1308
1309 ++channel->interrupts;
1310
1311 switch (channel->callback_mode) {
1312 case HV_CALL_ISR:
1313 (*callback_fn)(channel->channel_callback_context);
1314 break;
1315
1316 case HV_CALL_BATCHED:
1317 hv_begin_read(&channel->inbound);
1318 fallthrough;
1319 case HV_CALL_DIRECT:
1320 tasklet_schedule(&channel->callback_event);
1321 }
1322
1323 sched_unlock:
1324 spin_unlock(&channel->sched_lock);
1325 sched_unlock_rcu:
1326 rcu_read_unlock();
1327 }
1328 }
1329
1330 static void vmbus_isr(void)
1331 {
1332 struct hv_per_cpu_context *hv_cpu
1333 = this_cpu_ptr(hv_context.cpu_context);
1334 void *page_addr;
1335 struct hv_message *msg;
1336
1337 vmbus_chan_sched(hv_cpu);
1338
1339 page_addr = hv_cpu->synic_message_page;
1340 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1341
1342
1343 if (msg->header.message_type != HVMSG_NONE) {
1344 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1345 hv_stimer0_isr();
1346 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1347 } else
1348 tasklet_schedule(&hv_cpu->msg_dpc);
1349 }
1350
1351 add_interrupt_randomness(vmbus_interrupt);
1352 }
1353
1354 static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1355 {
1356 vmbus_isr();
1357 return IRQ_HANDLED;
1358 }
1359
1360
1361
1362
1363
1364 static void hv_kmsg_dump(struct kmsg_dumper *dumper,
1365 enum kmsg_dump_reason reason)
1366 {
1367 struct kmsg_dump_iter iter;
1368 size_t bytes_written;
1369
1370
1371 if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
1372 return;
1373
1374
1375
1376
1377
1378 kmsg_dump_rewind(&iter);
1379 kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
1380 &bytes_written);
1381 if (!bytes_written)
1382 return;
1383
1384
1385
1386
1387
1388 hv_set_register(HV_REGISTER_CRASH_P0, 0);
1389 hv_set_register(HV_REGISTER_CRASH_P1, 0);
1390 hv_set_register(HV_REGISTER_CRASH_P2, 0);
1391 hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
1392 hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
1393
1394
1395
1396
1397
1398 hv_set_register(HV_REGISTER_CRASH_CTL,
1399 (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
1400 }
1401
1402 static struct kmsg_dumper hv_kmsg_dumper = {
1403 .dump = hv_kmsg_dump,
1404 };
1405
1406 static void hv_kmsg_dump_register(void)
1407 {
1408 int ret;
1409
1410 hv_panic_page = hv_alloc_hyperv_zeroed_page();
1411 if (!hv_panic_page) {
1412 pr_err("Hyper-V: panic message page memory allocation failed\n");
1413 return;
1414 }
1415
1416 ret = kmsg_dump_register(&hv_kmsg_dumper);
1417 if (ret) {
1418 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
1419 hv_free_hyperv_page((unsigned long)hv_panic_page);
1420 hv_panic_page = NULL;
1421 }
1422 }
1423
1424 static struct ctl_table_header *hv_ctl_table_hdr;
1425
1426
1427
1428
1429
1430 static struct ctl_table hv_ctl_table[] = {
1431 {
1432 .procname = "hyperv_record_panic_msg",
1433 .data = &sysctl_record_panic_msg,
1434 .maxlen = sizeof(int),
1435 .mode = 0644,
1436 .proc_handler = proc_dointvec_minmax,
1437 .extra1 = SYSCTL_ZERO,
1438 .extra2 = SYSCTL_ONE
1439 },
1440 {}
1441 };
1442
1443 static struct ctl_table hv_root_table[] = {
1444 {
1445 .procname = "kernel",
1446 .mode = 0555,
1447 .child = hv_ctl_table
1448 },
1449 {}
1450 };
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 static int vmbus_bus_init(void)
1461 {
1462 int ret;
1463
1464 ret = hv_init();
1465 if (ret != 0) {
1466 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1467 return ret;
1468 }
1469
1470 ret = bus_register(&hv_bus);
1471 if (ret)
1472 return ret;
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 if (vmbus_irq == -1) {
1484 hv_setup_vmbus_handler(vmbus_isr);
1485 } else {
1486 vmbus_evt = alloc_percpu(long);
1487 ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1488 "Hyper-V VMbus", vmbus_evt);
1489 if (ret) {
1490 pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1491 vmbus_irq, ret);
1492 free_percpu(vmbus_evt);
1493 goto err_setup;
1494 }
1495 }
1496
1497 ret = hv_synic_alloc();
1498 if (ret)
1499 goto err_alloc;
1500
1501
1502
1503
1504
1505 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1506 hv_synic_init, hv_synic_cleanup);
1507 if (ret < 0)
1508 goto err_cpuhp;
1509 hyperv_cpuhp_online = ret;
1510
1511 ret = vmbus_connect();
1512 if (ret)
1513 goto err_connect;
1514
1515 if (hv_is_isolation_supported())
1516 sysctl_record_panic_msg = 0;
1517
1518
1519
1520
1521 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1522 u64 hyperv_crash_ctl;
1523
1524
1525
1526
1527
1528
1529
1530 hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
1531 if (!hv_ctl_table_hdr)
1532 pr_err("Hyper-V: sysctl table register error");
1533
1534
1535
1536
1537
1538 hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
1539 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
1540 hv_kmsg_dump_register();
1541
1542 register_die_notifier(&hyperv_die_block);
1543 }
1544
1545
1546
1547
1548
1549
1550 atomic_notifier_chain_register(&panic_notifier_list,
1551 &hyperv_panic_block);
1552
1553 vmbus_request_offers();
1554
1555 return 0;
1556
1557 err_connect:
1558 cpuhp_remove_state(hyperv_cpuhp_online);
1559 err_cpuhp:
1560 hv_synic_free();
1561 err_alloc:
1562 if (vmbus_irq == -1) {
1563 hv_remove_vmbus_handler();
1564 } else {
1565 free_percpu_irq(vmbus_irq, vmbus_evt);
1566 free_percpu(vmbus_evt);
1567 }
1568 err_setup:
1569 bus_unregister(&hv_bus);
1570 unregister_sysctl_table(hv_ctl_table_hdr);
1571 hv_ctl_table_hdr = NULL;
1572 return ret;
1573 }
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1587 {
1588 int ret;
1589
1590 pr_info("registering driver %s\n", hv_driver->name);
1591
1592 ret = vmbus_exists();
1593 if (ret < 0)
1594 return ret;
1595
1596 hv_driver->driver.name = hv_driver->name;
1597 hv_driver->driver.owner = owner;
1598 hv_driver->driver.mod_name = mod_name;
1599 hv_driver->driver.bus = &hv_bus;
1600
1601 spin_lock_init(&hv_driver->dynids.lock);
1602 INIT_LIST_HEAD(&hv_driver->dynids.list);
1603
1604 ret = driver_register(&hv_driver->driver);
1605
1606 return ret;
1607 }
1608 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1619 {
1620 pr_info("unregistering driver %s\n", hv_driver->name);
1621
1622 if (!vmbus_exists()) {
1623 driver_unregister(&hv_driver->driver);
1624 vmbus_free_dynids(hv_driver);
1625 }
1626 }
1627 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1628
1629
1630
1631
1632
1633 static void vmbus_chan_release(struct kobject *kobj)
1634 {
1635 struct vmbus_channel *channel
1636 = container_of(kobj, struct vmbus_channel, kobj);
1637
1638 kfree_rcu(channel, rcu);
1639 }
1640
1641 struct vmbus_chan_attribute {
1642 struct attribute attr;
1643 ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1644 ssize_t (*store)(struct vmbus_channel *chan,
1645 const char *buf, size_t count);
1646 };
1647 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1648 struct vmbus_chan_attribute chan_attr_##_name \
1649 = __ATTR(_name, _mode, _show, _store)
1650 #define VMBUS_CHAN_ATTR_RW(_name) \
1651 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1652 #define VMBUS_CHAN_ATTR_RO(_name) \
1653 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1654 #define VMBUS_CHAN_ATTR_WO(_name) \
1655 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1656
1657 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1658 struct attribute *attr, char *buf)
1659 {
1660 const struct vmbus_chan_attribute *attribute
1661 = container_of(attr, struct vmbus_chan_attribute, attr);
1662 struct vmbus_channel *chan
1663 = container_of(kobj, struct vmbus_channel, kobj);
1664
1665 if (!attribute->show)
1666 return -EIO;
1667
1668 return attribute->show(chan, buf);
1669 }
1670
1671 static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1672 struct attribute *attr, const char *buf,
1673 size_t count)
1674 {
1675 const struct vmbus_chan_attribute *attribute
1676 = container_of(attr, struct vmbus_chan_attribute, attr);
1677 struct vmbus_channel *chan
1678 = container_of(kobj, struct vmbus_channel, kobj);
1679
1680 if (!attribute->store)
1681 return -EIO;
1682
1683 return attribute->store(chan, buf, count);
1684 }
1685
1686 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1687 .show = vmbus_chan_attr_show,
1688 .store = vmbus_chan_attr_store,
1689 };
1690
1691 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1692 {
1693 struct hv_ring_buffer_info *rbi = &channel->outbound;
1694 ssize_t ret;
1695
1696 mutex_lock(&rbi->ring_buffer_mutex);
1697 if (!rbi->ring_buffer) {
1698 mutex_unlock(&rbi->ring_buffer_mutex);
1699 return -EINVAL;
1700 }
1701
1702 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1703 mutex_unlock(&rbi->ring_buffer_mutex);
1704 return ret;
1705 }
1706 static VMBUS_CHAN_ATTR_RO(out_mask);
1707
1708 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1709 {
1710 struct hv_ring_buffer_info *rbi = &channel->inbound;
1711 ssize_t ret;
1712
1713 mutex_lock(&rbi->ring_buffer_mutex);
1714 if (!rbi->ring_buffer) {
1715 mutex_unlock(&rbi->ring_buffer_mutex);
1716 return -EINVAL;
1717 }
1718
1719 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1720 mutex_unlock(&rbi->ring_buffer_mutex);
1721 return ret;
1722 }
1723 static VMBUS_CHAN_ATTR_RO(in_mask);
1724
1725 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1726 {
1727 struct hv_ring_buffer_info *rbi = &channel->inbound;
1728 ssize_t ret;
1729
1730 mutex_lock(&rbi->ring_buffer_mutex);
1731 if (!rbi->ring_buffer) {
1732 mutex_unlock(&rbi->ring_buffer_mutex);
1733 return -EINVAL;
1734 }
1735
1736 ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1737 mutex_unlock(&rbi->ring_buffer_mutex);
1738 return ret;
1739 }
1740 static VMBUS_CHAN_ATTR_RO(read_avail);
1741
1742 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1743 {
1744 struct hv_ring_buffer_info *rbi = &channel->outbound;
1745 ssize_t ret;
1746
1747 mutex_lock(&rbi->ring_buffer_mutex);
1748 if (!rbi->ring_buffer) {
1749 mutex_unlock(&rbi->ring_buffer_mutex);
1750 return -EINVAL;
1751 }
1752
1753 ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1754 mutex_unlock(&rbi->ring_buffer_mutex);
1755 return ret;
1756 }
1757 static VMBUS_CHAN_ATTR_RO(write_avail);
1758
1759 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1760 {
1761 return sprintf(buf, "%u\n", channel->target_cpu);
1762 }
1763 static ssize_t target_cpu_store(struct vmbus_channel *channel,
1764 const char *buf, size_t count)
1765 {
1766 u32 target_cpu, origin_cpu;
1767 ssize_t ret = count;
1768
1769 if (vmbus_proto_version < VERSION_WIN10_V4_1)
1770 return -EIO;
1771
1772 if (sscanf(buf, "%uu", &target_cpu) != 1)
1773 return -EIO;
1774
1775
1776 if (target_cpu >= nr_cpumask_bits)
1777 return -EINVAL;
1778
1779 if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
1780 return -EINVAL;
1781
1782
1783 cpus_read_lock();
1784
1785 if (!cpu_online(target_cpu)) {
1786 cpus_read_unlock();
1787 return -EINVAL;
1788 }
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812 mutex_lock(&vmbus_connection.channel_mutex);
1813
1814
1815
1816
1817
1818 if (channel->state != CHANNEL_OPENED_STATE) {
1819 ret = -EIO;
1820 goto cpu_store_unlock;
1821 }
1822
1823 origin_cpu = channel->target_cpu;
1824 if (target_cpu == origin_cpu)
1825 goto cpu_store_unlock;
1826
1827 if (vmbus_send_modifychannel(channel,
1828 hv_cpu_number_to_vp_number(target_cpu))) {
1829 ret = -EIO;
1830 goto cpu_store_unlock;
1831 }
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 channel->target_cpu = target_cpu;
1849
1850
1851 if (hv_is_perf_channel(channel))
1852 hv_update_allocated_cpus(origin_cpu, target_cpu);
1853
1854
1855 if (channel->change_target_cpu_callback) {
1856 (*channel->change_target_cpu_callback)(channel,
1857 origin_cpu, target_cpu);
1858 }
1859
1860 cpu_store_unlock:
1861 mutex_unlock(&vmbus_connection.channel_mutex);
1862 cpus_read_unlock();
1863 return ret;
1864 }
1865 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1866
1867 static ssize_t channel_pending_show(struct vmbus_channel *channel,
1868 char *buf)
1869 {
1870 return sprintf(buf, "%d\n",
1871 channel_pending(channel,
1872 vmbus_connection.monitor_pages[1]));
1873 }
1874 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1875
1876 static ssize_t channel_latency_show(struct vmbus_channel *channel,
1877 char *buf)
1878 {
1879 return sprintf(buf, "%d\n",
1880 channel_latency(channel,
1881 vmbus_connection.monitor_pages[1]));
1882 }
1883 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1884
1885 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1886 {
1887 return sprintf(buf, "%llu\n", channel->interrupts);
1888 }
1889 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1890
1891 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1892 {
1893 return sprintf(buf, "%llu\n", channel->sig_events);
1894 }
1895 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1896
1897 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1898 char *buf)
1899 {
1900 return sprintf(buf, "%llu\n",
1901 (unsigned long long)channel->intr_in_full);
1902 }
1903 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1904
1905 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1906 char *buf)
1907 {
1908 return sprintf(buf, "%llu\n",
1909 (unsigned long long)channel->intr_out_empty);
1910 }
1911 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1912
1913 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1914 char *buf)
1915 {
1916 return sprintf(buf, "%llu\n",
1917 (unsigned long long)channel->out_full_first);
1918 }
1919 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1920
1921 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1922 char *buf)
1923 {
1924 return sprintf(buf, "%llu\n",
1925 (unsigned long long)channel->out_full_total);
1926 }
1927 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1928
1929 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1930 char *buf)
1931 {
1932 return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1933 }
1934 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1935
1936 static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1937 char *buf)
1938 {
1939 return sprintf(buf, "%u\n",
1940 channel->offermsg.offer.sub_channel_index);
1941 }
1942 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1943
1944 static struct attribute *vmbus_chan_attrs[] = {
1945 &chan_attr_out_mask.attr,
1946 &chan_attr_in_mask.attr,
1947 &chan_attr_read_avail.attr,
1948 &chan_attr_write_avail.attr,
1949 &chan_attr_cpu.attr,
1950 &chan_attr_pending.attr,
1951 &chan_attr_latency.attr,
1952 &chan_attr_interrupts.attr,
1953 &chan_attr_events.attr,
1954 &chan_attr_intr_in_full.attr,
1955 &chan_attr_intr_out_empty.attr,
1956 &chan_attr_out_full_first.attr,
1957 &chan_attr_out_full_total.attr,
1958 &chan_attr_monitor_id.attr,
1959 &chan_attr_subchannel_id.attr,
1960 NULL
1961 };
1962
1963
1964
1965
1966
1967 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1968 struct attribute *attr, int idx)
1969 {
1970 const struct vmbus_channel *channel =
1971 container_of(kobj, struct vmbus_channel, kobj);
1972
1973
1974 if (!channel->offermsg.monitor_allocated &&
1975 (attr == &chan_attr_pending.attr ||
1976 attr == &chan_attr_latency.attr ||
1977 attr == &chan_attr_monitor_id.attr))
1978 return 0;
1979
1980 return attr->mode;
1981 }
1982
1983 static struct attribute_group vmbus_chan_group = {
1984 .attrs = vmbus_chan_attrs,
1985 .is_visible = vmbus_chan_attr_is_visible
1986 };
1987
1988 static struct kobj_type vmbus_chan_ktype = {
1989 .sysfs_ops = &vmbus_chan_sysfs_ops,
1990 .release = vmbus_chan_release,
1991 };
1992
1993
1994
1995
1996 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1997 {
1998 const struct device *device = &dev->device;
1999 struct kobject *kobj = &channel->kobj;
2000 u32 relid = channel->offermsg.child_relid;
2001 int ret;
2002
2003 kobj->kset = dev->channels_kset;
2004 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
2005 "%u", relid);
2006 if (ret) {
2007 kobject_put(kobj);
2008 return ret;
2009 }
2010
2011 ret = sysfs_create_group(kobj, &vmbus_chan_group);
2012
2013 if (ret) {
2014
2015
2016
2017
2018 kobject_put(kobj);
2019 dev_err(device, "Unable to set up channel sysfs files\n");
2020 return ret;
2021 }
2022
2023 kobject_uevent(kobj, KOBJ_ADD);
2024
2025 return 0;
2026 }
2027
2028
2029
2030
2031 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
2032 {
2033 sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
2034 }
2035
2036
2037
2038
2039
2040 struct hv_device *vmbus_device_create(const guid_t *type,
2041 const guid_t *instance,
2042 struct vmbus_channel *channel)
2043 {
2044 struct hv_device *child_device_obj;
2045
2046 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
2047 if (!child_device_obj) {
2048 pr_err("Unable to allocate device object for child device\n");
2049 return NULL;
2050 }
2051
2052 child_device_obj->channel = channel;
2053 guid_copy(&child_device_obj->dev_type, type);
2054 guid_copy(&child_device_obj->dev_instance, instance);
2055 child_device_obj->vendor_id = 0x1414;
2056
2057 return child_device_obj;
2058 }
2059
2060
2061
2062
2063 int vmbus_device_register(struct hv_device *child_device_obj)
2064 {
2065 struct kobject *kobj = &child_device_obj->device.kobj;
2066 int ret;
2067
2068 dev_set_name(&child_device_obj->device, "%pUl",
2069 &child_device_obj->channel->offermsg.offer.if_instance);
2070
2071 child_device_obj->device.bus = &hv_bus;
2072 child_device_obj->device.parent = &hv_acpi_dev->dev;
2073 child_device_obj->device.release = vmbus_device_release;
2074
2075 child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
2076 child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
2077 dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
2078
2079
2080
2081
2082
2083 ret = device_register(&child_device_obj->device);
2084 if (ret) {
2085 pr_err("Unable to register child device\n");
2086 return ret;
2087 }
2088
2089 child_device_obj->channels_kset = kset_create_and_add("channels",
2090 NULL, kobj);
2091 if (!child_device_obj->channels_kset) {
2092 ret = -ENOMEM;
2093 goto err_dev_unregister;
2094 }
2095
2096 ret = vmbus_add_channel_kobj(child_device_obj,
2097 child_device_obj->channel);
2098 if (ret) {
2099 pr_err("Unable to register primary channeln");
2100 goto err_kset_unregister;
2101 }
2102 hv_debug_add_dev_dir(child_device_obj);
2103
2104 return 0;
2105
2106 err_kset_unregister:
2107 kset_unregister(child_device_obj->channels_kset);
2108
2109 err_dev_unregister:
2110 device_unregister(&child_device_obj->device);
2111 return ret;
2112 }
2113
2114
2115
2116
2117
2118 void vmbus_device_unregister(struct hv_device *device_obj)
2119 {
2120 pr_debug("child device %s unregistered\n",
2121 dev_name(&device_obj->device));
2122
2123 kset_unregister(device_obj->channels_kset);
2124
2125
2126
2127
2128
2129 device_unregister(&device_obj->device);
2130 }
2131
2132
2133
2134
2135
2136
2137 #define VTPM_BASE_ADDRESS 0xfed40000
2138 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
2139 {
2140 resource_size_t start = 0;
2141 resource_size_t end = 0;
2142 struct resource *new_res;
2143 struct resource **old_res = &hyperv_mmio;
2144 struct resource **prev_res = NULL;
2145 struct resource r;
2146
2147 switch (res->type) {
2148
2149
2150
2151
2152
2153
2154 case ACPI_RESOURCE_TYPE_ADDRESS32:
2155 start = res->data.address32.address.minimum;
2156 end = res->data.address32.address.maximum;
2157 break;
2158
2159 case ACPI_RESOURCE_TYPE_ADDRESS64:
2160 start = res->data.address64.address.minimum;
2161 end = res->data.address64.address.maximum;
2162 break;
2163
2164
2165
2166
2167
2168
2169
2170 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
2171 if (!acpi_dev_resource_interrupt(res, 0, &r)) {
2172 pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2173 return AE_ERROR;
2174 }
2175
2176 vmbus_interrupt = res->data.extended_irq.interrupts[0];
2177
2178 vmbus_irq = r.start;
2179 return AE_OK;
2180
2181 default:
2182
2183 return AE_OK;
2184
2185 }
2186
2187
2188
2189
2190 if (end < 0x100000)
2191 return AE_OK;
2192
2193 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2194 if (!new_res)
2195 return AE_NO_MEMORY;
2196
2197
2198 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2199 end = VTPM_BASE_ADDRESS;
2200
2201 new_res->name = "hyperv mmio";
2202 new_res->flags = IORESOURCE_MEM;
2203 new_res->start = start;
2204 new_res->end = end;
2205
2206
2207
2208
2209 do {
2210 if (!*old_res) {
2211 *old_res = new_res;
2212 break;
2213 }
2214
2215 if (((*old_res)->end + 1) == new_res->start) {
2216 (*old_res)->end = new_res->end;
2217 kfree(new_res);
2218 break;
2219 }
2220
2221 if ((*old_res)->start == new_res->end + 1) {
2222 (*old_res)->start = new_res->start;
2223 kfree(new_res);
2224 break;
2225 }
2226
2227 if ((*old_res)->start > new_res->end) {
2228 new_res->sibling = *old_res;
2229 if (prev_res)
2230 (*prev_res)->sibling = new_res;
2231 *old_res = new_res;
2232 break;
2233 }
2234
2235 prev_res = old_res;
2236 old_res = &(*old_res)->sibling;
2237
2238 } while (1);
2239
2240 return AE_OK;
2241 }
2242
2243 static int vmbus_acpi_remove(struct acpi_device *device)
2244 {
2245 struct resource *cur_res;
2246 struct resource *next_res;
2247
2248 if (hyperv_mmio) {
2249 if (fb_mmio) {
2250 __release_region(hyperv_mmio, fb_mmio->start,
2251 resource_size(fb_mmio));
2252 fb_mmio = NULL;
2253 }
2254
2255 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2256 next_res = cur_res->sibling;
2257 kfree(cur_res);
2258 }
2259 }
2260
2261 return 0;
2262 }
2263
2264 static void vmbus_reserve_fb(void)
2265 {
2266 resource_size_t start = 0, size;
2267 struct pci_dev *pdev;
2268
2269 if (efi_enabled(EFI_BOOT)) {
2270
2271 start = screen_info.lfb_base;
2272 size = max_t(__u32, screen_info.lfb_size, 0x800000);
2273 } else {
2274
2275 pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
2276 PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
2277 if (!pdev)
2278 return;
2279
2280 if (pdev->resource[0].flags & IORESOURCE_MEM) {
2281 start = pci_resource_start(pdev, 0);
2282 size = pci_resource_len(pdev, 0);
2283 }
2284
2285
2286
2287
2288
2289 pci_dev_put(pdev);
2290 }
2291
2292 if (!start)
2293 return;
2294
2295
2296
2297
2298
2299
2300
2301 for (; !fb_mmio && (size >= 0x100000); size >>= 1)
2302 fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
2303 }
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2329 resource_size_t min, resource_size_t max,
2330 resource_size_t size, resource_size_t align,
2331 bool fb_overlap_ok)
2332 {
2333 struct resource *iter, *shadow;
2334 resource_size_t range_min, range_max, start, end;
2335 const char *dev_n = dev_name(&device_obj->device);
2336 int retval;
2337
2338 retval = -ENXIO;
2339 mutex_lock(&hyperv_mmio_lock);
2340
2341
2342
2343
2344
2345
2346 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2347 !(max < fb_mmio->start)) {
2348
2349 range_min = fb_mmio->start;
2350 range_max = fb_mmio->end;
2351 start = (range_min + align - 1) & ~(align - 1);
2352 for (; start + size - 1 <= range_max; start += align) {
2353 *new = request_mem_region_exclusive(start, size, dev_n);
2354 if (*new) {
2355 retval = 0;
2356 goto exit;
2357 }
2358 }
2359 }
2360
2361 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2362 if ((iter->start >= max) || (iter->end <= min))
2363 continue;
2364
2365 range_min = iter->start;
2366 range_max = iter->end;
2367 start = (range_min + align - 1) & ~(align - 1);
2368 for (; start + size - 1 <= range_max; start += align) {
2369 end = start + size - 1;
2370
2371
2372 if (!fb_overlap_ok && fb_mmio &&
2373 (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
2374 ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
2375 continue;
2376
2377 shadow = __request_region(iter, start, size, NULL,
2378 IORESOURCE_BUSY);
2379 if (!shadow)
2380 continue;
2381
2382 *new = request_mem_region_exclusive(start, size, dev_n);
2383 if (*new) {
2384 shadow->name = (char *)*new;
2385 retval = 0;
2386 goto exit;
2387 }
2388
2389 __release_region(iter, start, size);
2390 }
2391 }
2392
2393 exit:
2394 mutex_unlock(&hyperv_mmio_lock);
2395 return retval;
2396 }
2397 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2408 {
2409 struct resource *iter;
2410
2411 mutex_lock(&hyperv_mmio_lock);
2412 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2413 if ((iter->start >= start + size) || (iter->end <= start))
2414 continue;
2415
2416 __release_region(iter, start, size);
2417 }
2418 release_mem_region(start, size);
2419 mutex_unlock(&hyperv_mmio_lock);
2420
2421 }
2422 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2423
2424 static int vmbus_acpi_add(struct acpi_device *device)
2425 {
2426 acpi_status result;
2427 int ret_val = -ENODEV;
2428 struct acpi_device *ancestor;
2429
2430 hv_acpi_dev = device;
2431
2432
2433
2434
2435
2436
2437
2438
2439 ACPI_COMPANION_SET(&device->dev, device);
2440 if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
2441 device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
2442 pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
2443 device->flags.cca_seen = true;
2444 device->flags.coherent_dma = true;
2445 }
2446
2447 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2448 vmbus_walk_resources, NULL);
2449
2450 if (ACPI_FAILURE(result))
2451 goto acpi_walk_err;
2452
2453
2454
2455
2456 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
2457 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2458 vmbus_walk_resources, NULL);
2459
2460 if (ACPI_FAILURE(result))
2461 continue;
2462 if (hyperv_mmio) {
2463 vmbus_reserve_fb();
2464 break;
2465 }
2466 }
2467 ret_val = 0;
2468
2469 acpi_walk_err:
2470 complete(&probe_event);
2471 if (ret_val)
2472 vmbus_acpi_remove(device);
2473 return ret_val;
2474 }
2475
2476 #ifdef CONFIG_PM_SLEEP
2477 static int vmbus_bus_suspend(struct device *dev)
2478 {
2479 struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
2480 hv_context.cpu_context, VMBUS_CONNECT_CPU);
2481 struct vmbus_channel *channel, *sc;
2482
2483 tasklet_disable(&hv_cpu->msg_dpc);
2484 vmbus_connection.ignore_any_offer_msg = true;
2485
2486 tasklet_enable(&hv_cpu->msg_dpc);
2487
2488
2489 drain_workqueue(vmbus_connection.rescind_work_queue);
2490 drain_workqueue(vmbus_connection.work_queue);
2491 drain_workqueue(vmbus_connection.handle_primary_chan_wq);
2492 drain_workqueue(vmbus_connection.handle_sub_chan_wq);
2493
2494 mutex_lock(&vmbus_connection.channel_mutex);
2495 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2496 if (!is_hvsock_channel(channel))
2497 continue;
2498
2499 vmbus_force_channel_rescinded(channel);
2500 }
2501 mutex_unlock(&vmbus_connection.channel_mutex);
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2518 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2519
2520 if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
2521 pr_err("Can not suspend due to a previous failed resuming\n");
2522 return -EBUSY;
2523 }
2524
2525 mutex_lock(&vmbus_connection.channel_mutex);
2526
2527 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2528
2529
2530
2531
2532
2533
2534 vmbus_channel_unmap_relid(channel);
2535 channel->offermsg.child_relid = INVALID_RELID;
2536
2537 if (is_hvsock_channel(channel)) {
2538 if (!channel->rescind) {
2539 pr_err("hv_sock channel not rescinded!\n");
2540 WARN_ON_ONCE(1);
2541 }
2542 continue;
2543 }
2544
2545 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2546 pr_err("Sub-channel not deleted!\n");
2547 WARN_ON_ONCE(1);
2548 }
2549
2550 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2551 }
2552
2553 mutex_unlock(&vmbus_connection.channel_mutex);
2554
2555 vmbus_initiate_unload(false);
2556
2557
2558 reinit_completion(&vmbus_connection.ready_for_resume_event);
2559
2560 return 0;
2561 }
2562
2563 static int vmbus_bus_resume(struct device *dev)
2564 {
2565 struct vmbus_channel_msginfo *msginfo;
2566 size_t msgsize;
2567 int ret;
2568
2569 vmbus_connection.ignore_any_offer_msg = false;
2570
2571
2572
2573
2574
2575 if (!vmbus_proto_version) {
2576 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2577 return -EINVAL;
2578 }
2579
2580 msgsize = sizeof(*msginfo) +
2581 sizeof(struct vmbus_channel_initiate_contact);
2582
2583 msginfo = kzalloc(msgsize, GFP_KERNEL);
2584
2585 if (msginfo == NULL)
2586 return -ENOMEM;
2587
2588 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2589
2590 kfree(msginfo);
2591
2592 if (ret != 0)
2593 return ret;
2594
2595 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2596
2597 vmbus_request_offers();
2598
2599 if (wait_for_completion_timeout(
2600 &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
2601 pr_err("Some vmbus device is missing after suspending?\n");
2602
2603
2604 reinit_completion(&vmbus_connection.ready_for_suspend_event);
2605
2606 return 0;
2607 }
2608 #else
2609 #define vmbus_bus_suspend NULL
2610 #define vmbus_bus_resume NULL
2611 #endif
2612
2613 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2614 {"VMBUS", 0},
2615 {"VMBus", 0},
2616 {"", 0},
2617 };
2618 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632 static const struct dev_pm_ops vmbus_bus_pm = {
2633 .suspend_noirq = NULL,
2634 .resume_noirq = NULL,
2635 .freeze_noirq = vmbus_bus_suspend,
2636 .thaw_noirq = vmbus_bus_resume,
2637 .poweroff_noirq = vmbus_bus_suspend,
2638 .restore_noirq = vmbus_bus_resume
2639 };
2640
2641 static struct acpi_driver vmbus_acpi_driver = {
2642 .name = "vmbus",
2643 .ids = vmbus_acpi_device_ids,
2644 .ops = {
2645 .add = vmbus_acpi_add,
2646 .remove = vmbus_acpi_remove,
2647 },
2648 .drv.pm = &vmbus_bus_pm,
2649 };
2650
2651 static void hv_kexec_handler(void)
2652 {
2653 hv_stimer_global_cleanup();
2654 vmbus_initiate_unload(false);
2655
2656 mb();
2657 cpuhp_remove_state(hyperv_cpuhp_online);
2658 };
2659
2660 static void hv_crash_handler(struct pt_regs *regs)
2661 {
2662 int cpu;
2663
2664 vmbus_initiate_unload(true);
2665
2666
2667
2668
2669
2670 cpu = smp_processor_id();
2671 hv_stimer_cleanup(cpu);
2672 hv_synic_disable_regs(cpu);
2673 };
2674
2675 static int hv_synic_suspend(void)
2676 {
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697 hv_synic_disable_regs(0);
2698
2699 return 0;
2700 }
2701
2702 static void hv_synic_resume(void)
2703 {
2704 hv_synic_enable_regs(0);
2705
2706
2707
2708
2709
2710
2711 }
2712
2713
2714 static struct syscore_ops hv_synic_syscore_ops = {
2715 .suspend = hv_synic_suspend,
2716 .resume = hv_synic_resume,
2717 };
2718
2719 static int __init hv_acpi_init(void)
2720 {
2721 int ret, t;
2722
2723 if (!hv_is_hyperv_initialized())
2724 return -ENODEV;
2725
2726 if (hv_root_partition)
2727 return 0;
2728
2729 init_completion(&probe_event);
2730
2731
2732
2733
2734 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
2735
2736 if (ret)
2737 return ret;
2738
2739 t = wait_for_completion_timeout(&probe_event, 5*HZ);
2740 if (t == 0) {
2741 ret = -ETIMEDOUT;
2742 goto cleanup;
2743 }
2744
2745
2746
2747
2748
2749
2750
2751 #ifdef HYPERVISOR_CALLBACK_VECTOR
2752 vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2753 vmbus_irq = -1;
2754 #endif
2755
2756 hv_debug_init();
2757
2758 ret = vmbus_bus_init();
2759 if (ret)
2760 goto cleanup;
2761
2762 hv_setup_kexec_handler(hv_kexec_handler);
2763 hv_setup_crash_handler(hv_crash_handler);
2764
2765 register_syscore_ops(&hv_synic_syscore_ops);
2766
2767 return 0;
2768
2769 cleanup:
2770 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2771 hv_acpi_dev = NULL;
2772 return ret;
2773 }
2774
2775 static void __exit vmbus_exit(void)
2776 {
2777 int cpu;
2778
2779 unregister_syscore_ops(&hv_synic_syscore_ops);
2780
2781 hv_remove_kexec_handler();
2782 hv_remove_crash_handler();
2783 vmbus_connection.conn_state = DISCONNECTED;
2784 hv_stimer_global_cleanup();
2785 vmbus_disconnect();
2786 if (vmbus_irq == -1) {
2787 hv_remove_vmbus_handler();
2788 } else {
2789 free_percpu_irq(vmbus_irq, vmbus_evt);
2790 free_percpu(vmbus_evt);
2791 }
2792 for_each_online_cpu(cpu) {
2793 struct hv_per_cpu_context *hv_cpu
2794 = per_cpu_ptr(hv_context.cpu_context, cpu);
2795
2796 tasklet_kill(&hv_cpu->msg_dpc);
2797 }
2798 hv_debug_rm_all_dir();
2799
2800 vmbus_free_channels();
2801 kfree(vmbus_connection.channels);
2802
2803 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
2804 kmsg_dump_unregister(&hv_kmsg_dumper);
2805 unregister_die_notifier(&hyperv_die_block);
2806 }
2807
2808
2809
2810
2811
2812 atomic_notifier_chain_unregister(&panic_notifier_list,
2813 &hyperv_panic_block);
2814
2815 free_page((unsigned long)hv_panic_page);
2816 unregister_sysctl_table(hv_ctl_table_hdr);
2817 hv_ctl_table_hdr = NULL;
2818 bus_unregister(&hv_bus);
2819
2820 cpuhp_remove_state(hyperv_cpuhp_online);
2821 hv_synic_free();
2822 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2823 }
2824
2825
2826 MODULE_LICENSE("GPL");
2827 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2828
2829 subsys_initcall(hv_acpi_init);
2830 module_exit(vmbus_exit);