0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/mm.h>
0034 #include <linux/slab.h>
0035 #include <linux/types.h>
0036 #include <linux/spinlock.h>
0037 #include <linux/vmalloc.h>
0038 #include <linux/export.h>
0039 #include <asm/xen/hypervisor.h>
0040 #include <xen/page.h>
0041 #include <xen/interface/xen.h>
0042 #include <xen/interface/event_channel.h>
0043 #include <xen/balloon.h>
0044 #include <xen/events.h>
0045 #include <xen/grant_table.h>
0046 #include <xen/xenbus.h>
0047 #include <xen/xen.h>
0048 #include <xen/features.h>
0049
0050 #include "xenbus.h"
0051
0052 #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
0053
0054 #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
0055
0056 struct xenbus_map_node {
0057 struct list_head next;
0058 union {
0059 struct {
0060 struct vm_struct *area;
0061 } pv;
0062 struct {
0063 struct page *pages[XENBUS_MAX_RING_PAGES];
0064 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
0065 void *addr;
0066 } hvm;
0067 };
0068 grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
0069 unsigned int nr_handles;
0070 };
0071
0072 struct map_ring_valloc {
0073 struct xenbus_map_node *node;
0074
0075
0076 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
0077 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
0078
0079 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
0080 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
0081
0082 unsigned int idx;
0083 };
0084
0085 static DEFINE_SPINLOCK(xenbus_valloc_lock);
0086 static LIST_HEAD(xenbus_valloc_pages);
0087
0088 struct xenbus_ring_ops {
0089 int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
0090 grant_ref_t *gnt_refs, unsigned int nr_grefs,
0091 void **vaddr);
0092 int (*unmap)(struct xenbus_device *dev, void *vaddr);
0093 };
0094
0095 static const struct xenbus_ring_ops *ring_ops __read_mostly;
0096
0097 const char *xenbus_strstate(enum xenbus_state state)
0098 {
0099 static const char *const name[] = {
0100 [ XenbusStateUnknown ] = "Unknown",
0101 [ XenbusStateInitialising ] = "Initialising",
0102 [ XenbusStateInitWait ] = "InitWait",
0103 [ XenbusStateInitialised ] = "Initialised",
0104 [ XenbusStateConnected ] = "Connected",
0105 [ XenbusStateClosing ] = "Closing",
0106 [ XenbusStateClosed ] = "Closed",
0107 [XenbusStateReconfiguring] = "Reconfiguring",
0108 [XenbusStateReconfigured] = "Reconfigured",
0109 };
0110 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
0111 }
0112 EXPORT_SYMBOL_GPL(xenbus_strstate);
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
0129 struct xenbus_watch *watch,
0130 bool (*will_handle)(struct xenbus_watch *,
0131 const char *, const char *),
0132 void (*callback)(struct xenbus_watch *,
0133 const char *, const char *))
0134 {
0135 int err;
0136
0137 watch->node = path;
0138 watch->will_handle = will_handle;
0139 watch->callback = callback;
0140
0141 err = register_xenbus_watch(watch);
0142
0143 if (err) {
0144 watch->node = NULL;
0145 watch->will_handle = NULL;
0146 watch->callback = NULL;
0147 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
0148 }
0149
0150 return err;
0151 }
0152 EXPORT_SYMBOL_GPL(xenbus_watch_path);
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 int xenbus_watch_pathfmt(struct xenbus_device *dev,
0171 struct xenbus_watch *watch,
0172 bool (*will_handle)(struct xenbus_watch *,
0173 const char *, const char *),
0174 void (*callback)(struct xenbus_watch *,
0175 const char *, const char *),
0176 const char *pathfmt, ...)
0177 {
0178 int err;
0179 va_list ap;
0180 char *path;
0181
0182 va_start(ap, pathfmt);
0183 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
0184 va_end(ap);
0185
0186 if (!path) {
0187 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
0188 return -ENOMEM;
0189 }
0190 err = xenbus_watch_path(dev, path, watch, will_handle, callback);
0191
0192 if (err)
0193 kfree(path);
0194 return err;
0195 }
0196 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
0197
0198 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
0199 const char *, ...);
0200
0201 static int
0202 __xenbus_switch_state(struct xenbus_device *dev,
0203 enum xenbus_state state, int depth)
0204 {
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 struct xenbus_transaction xbt;
0219 int current_state;
0220 int err, abort;
0221
0222 if (state == dev->state)
0223 return 0;
0224
0225 again:
0226 abort = 1;
0227
0228 err = xenbus_transaction_start(&xbt);
0229 if (err) {
0230 xenbus_switch_fatal(dev, depth, err, "starting transaction");
0231 return 0;
0232 }
0233
0234 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
0235 if (err != 1)
0236 goto abort;
0237
0238 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
0239 if (err) {
0240 xenbus_switch_fatal(dev, depth, err, "writing new state");
0241 goto abort;
0242 }
0243
0244 abort = 0;
0245 abort:
0246 err = xenbus_transaction_end(xbt, abort);
0247 if (err) {
0248 if (err == -EAGAIN && !abort)
0249 goto again;
0250 xenbus_switch_fatal(dev, depth, err, "ending transaction");
0251 } else
0252 dev->state = state;
0253
0254 return 0;
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
0267 {
0268 return __xenbus_switch_state(dev, state, 0);
0269 }
0270
0271 EXPORT_SYMBOL_GPL(xenbus_switch_state);
0272
0273 int xenbus_frontend_closed(struct xenbus_device *dev)
0274 {
0275 xenbus_switch_state(dev, XenbusStateClosed);
0276 complete(&dev->down);
0277 return 0;
0278 }
0279 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
0280
0281 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
0282 const char *fmt, va_list ap)
0283 {
0284 unsigned int len;
0285 char *printf_buffer;
0286 char *path_buffer;
0287
0288 #define PRINTF_BUFFER_SIZE 4096
0289
0290 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
0291 if (!printf_buffer)
0292 return;
0293
0294 len = sprintf(printf_buffer, "%i ", -err);
0295 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
0296
0297 dev_err(&dev->dev, "%s\n", printf_buffer);
0298
0299 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
0300 if (path_buffer)
0301 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
0302
0303 kfree(printf_buffer);
0304 kfree(path_buffer);
0305 }
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
0317 {
0318 va_list ap;
0319
0320 va_start(ap, fmt);
0321 xenbus_va_dev_error(dev, err, fmt, ap);
0322 va_end(ap);
0323 }
0324 EXPORT_SYMBOL_GPL(xenbus_dev_error);
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
0338 {
0339 va_list ap;
0340
0341 va_start(ap, fmt);
0342 xenbus_va_dev_error(dev, err, fmt, ap);
0343 va_end(ap);
0344
0345 xenbus_switch_state(dev, XenbusStateClosing);
0346 }
0347 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
0348
0349
0350
0351
0352
0353 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
0354 const char *fmt, ...)
0355 {
0356 va_list ap;
0357
0358 va_start(ap, fmt);
0359 xenbus_va_dev_error(dev, err, fmt, ap);
0360 va_end(ap);
0361
0362 if (!depth)
0363 __xenbus_switch_state(dev, XenbusStateClosing, 1);
0364 }
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379 int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
0380 unsigned int nr_pages, grant_ref_t *grefs)
0381 {
0382 unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
0383 grant_ref_t gref_head;
0384 unsigned int i;
0385 void *addr;
0386 int ret;
0387
0388 addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
0389 if (!*vaddr) {
0390 ret = -ENOMEM;
0391 goto err;
0392 }
0393
0394 ret = gnttab_alloc_grant_references(nr_pages, &gref_head);
0395 if (ret) {
0396 xenbus_dev_fatal(dev, ret, "granting access to %u ring pages",
0397 nr_pages);
0398 goto err;
0399 }
0400
0401 for (i = 0; i < nr_pages; i++) {
0402 unsigned long gfn;
0403
0404 if (is_vmalloc_addr(*vaddr))
0405 gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
0406 else
0407 gfn = virt_to_gfn(addr);
0408
0409 grefs[i] = gnttab_claim_grant_reference(&gref_head);
0410 gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
0411 gfn, 0);
0412
0413 addr += XEN_PAGE_SIZE;
0414 }
0415
0416 return 0;
0417
0418 err:
0419 if (*vaddr)
0420 free_pages_exact(*vaddr, ring_size);
0421 for (i = 0; i < nr_pages; i++)
0422 grefs[i] = INVALID_GRANT_REF;
0423 *vaddr = NULL;
0424
0425 return ret;
0426 }
0427 EXPORT_SYMBOL_GPL(xenbus_setup_ring);
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
0439 grant_ref_t *grefs)
0440 {
0441 unsigned int i;
0442
0443 for (i = 0; i < nr_pages; i++) {
0444 if (grefs[i] != INVALID_GRANT_REF) {
0445 gnttab_end_foreign_access(grefs[i], NULL);
0446 grefs[i] = INVALID_GRANT_REF;
0447 }
0448 }
0449
0450 if (*vaddr)
0451 free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE);
0452 *vaddr = NULL;
0453 }
0454 EXPORT_SYMBOL_GPL(xenbus_teardown_ring);
0455
0456
0457
0458
0459
0460
0461
0462 int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
0463 {
0464 struct evtchn_alloc_unbound alloc_unbound;
0465 int err;
0466
0467 alloc_unbound.dom = DOMID_SELF;
0468 alloc_unbound.remote_dom = dev->otherend_id;
0469
0470 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
0471 &alloc_unbound);
0472 if (err)
0473 xenbus_dev_fatal(dev, err, "allocating event channel");
0474 else
0475 *port = alloc_unbound.port;
0476
0477 return err;
0478 }
0479 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
0480
0481
0482
0483
0484
0485 int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
0486 {
0487 struct evtchn_close close;
0488 int err;
0489
0490 close.port = port;
0491
0492 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
0493 if (err)
0494 xenbus_dev_error(dev, err, "freeing event channel %u", port);
0495
0496 return err;
0497 }
0498 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
0516 unsigned int nr_grefs, void **vaddr)
0517 {
0518 int err;
0519 struct map_ring_valloc *info;
0520
0521 *vaddr = NULL;
0522
0523 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
0524 return -EINVAL;
0525
0526 info = kzalloc(sizeof(*info), GFP_KERNEL);
0527 if (!info)
0528 return -ENOMEM;
0529
0530 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
0531 if (!info->node)
0532 err = -ENOMEM;
0533 else
0534 err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
0535
0536 kfree(info->node);
0537 kfree(info);
0538 return err;
0539 }
0540 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
0541
0542
0543
0544
0545 static int __xenbus_map_ring(struct xenbus_device *dev,
0546 grant_ref_t *gnt_refs,
0547 unsigned int nr_grefs,
0548 grant_handle_t *handles,
0549 struct map_ring_valloc *info,
0550 unsigned int flags,
0551 bool *leaked)
0552 {
0553 int i, j;
0554
0555 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
0556 return -EINVAL;
0557
0558 for (i = 0; i < nr_grefs; i++) {
0559 gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
0560 gnt_refs[i], dev->otherend_id);
0561 handles[i] = INVALID_GRANT_HANDLE;
0562 }
0563
0564 gnttab_batch_map(info->map, i);
0565
0566 for (i = 0; i < nr_grefs; i++) {
0567 if (info->map[i].status != GNTST_okay) {
0568 xenbus_dev_fatal(dev, info->map[i].status,
0569 "mapping in shared page %d from domain %d",
0570 gnt_refs[i], dev->otherend_id);
0571 goto fail;
0572 } else
0573 handles[i] = info->map[i].handle;
0574 }
0575
0576 return 0;
0577
0578 fail:
0579 for (i = j = 0; i < nr_grefs; i++) {
0580 if (handles[i] != INVALID_GRANT_HANDLE) {
0581 gnttab_set_unmap_op(&info->unmap[j],
0582 info->phys_addrs[i],
0583 GNTMAP_host_map, handles[i]);
0584 j++;
0585 }
0586 }
0587
0588 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j));
0589
0590 *leaked = false;
0591 for (i = 0; i < j; i++) {
0592 if (info->unmap[i].status != GNTST_okay) {
0593 *leaked = true;
0594 break;
0595 }
0596 }
0597
0598 return -ENOENT;
0599 }
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612 static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
0613 unsigned int nr_handles, unsigned long *vaddrs)
0614 {
0615 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
0616 int i;
0617 int err;
0618
0619 if (nr_handles > XENBUS_MAX_RING_GRANTS)
0620 return -EINVAL;
0621
0622 for (i = 0; i < nr_handles; i++)
0623 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
0624 GNTMAP_host_map, handles[i]);
0625
0626 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i));
0627
0628 err = GNTST_okay;
0629 for (i = 0; i < nr_handles; i++) {
0630 if (unmap[i].status != GNTST_okay) {
0631 xenbus_dev_error(dev, unmap[i].status,
0632 "unmapping page at handle %d error %d",
0633 handles[i], unmap[i].status);
0634 err = unmap[i].status;
0635 break;
0636 }
0637 }
0638
0639 return err;
0640 }
0641
0642 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
0643 unsigned int goffset,
0644 unsigned int len,
0645 void *data)
0646 {
0647 struct map_ring_valloc *info = data;
0648 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
0649
0650 info->phys_addrs[info->idx] = vaddr;
0651 info->addrs[info->idx] = vaddr;
0652
0653 info->idx++;
0654 }
0655
0656 static int xenbus_map_ring_hvm(struct xenbus_device *dev,
0657 struct map_ring_valloc *info,
0658 grant_ref_t *gnt_ref,
0659 unsigned int nr_grefs,
0660 void **vaddr)
0661 {
0662 struct xenbus_map_node *node = info->node;
0663 int err;
0664 void *addr;
0665 bool leaked = false;
0666 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
0667
0668 err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
0669 if (err)
0670 goto out_err;
0671
0672 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
0673 xenbus_map_ring_setup_grant_hvm,
0674 info);
0675
0676 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
0677 info, GNTMAP_host_map, &leaked);
0678 node->nr_handles = nr_grefs;
0679
0680 if (err)
0681 goto out_free_ballooned_pages;
0682
0683 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
0684 PAGE_KERNEL);
0685 if (!addr) {
0686 err = -ENOMEM;
0687 goto out_xenbus_unmap_ring;
0688 }
0689
0690 node->hvm.addr = addr;
0691
0692 spin_lock(&xenbus_valloc_lock);
0693 list_add(&node->next, &xenbus_valloc_pages);
0694 spin_unlock(&xenbus_valloc_lock);
0695
0696 *vaddr = addr;
0697 info->node = NULL;
0698
0699 return 0;
0700
0701 out_xenbus_unmap_ring:
0702 if (!leaked)
0703 xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
0704 else
0705 pr_alert("leaking %p size %u page(s)",
0706 addr, nr_pages);
0707 out_free_ballooned_pages:
0708 if (!leaked)
0709 xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
0710 out_err:
0711 return err;
0712 }
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
0727 {
0728 return ring_ops->unmap(dev, vaddr);
0729 }
0730 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
0731
0732 #ifdef CONFIG_XEN_PV
0733 static int map_ring_apply(pte_t *pte, unsigned long addr, void *data)
0734 {
0735 struct map_ring_valloc *info = data;
0736
0737 info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr;
0738 return 0;
0739 }
0740
0741 static int xenbus_map_ring_pv(struct xenbus_device *dev,
0742 struct map_ring_valloc *info,
0743 grant_ref_t *gnt_refs,
0744 unsigned int nr_grefs,
0745 void **vaddr)
0746 {
0747 struct xenbus_map_node *node = info->node;
0748 struct vm_struct *area;
0749 bool leaked = false;
0750 int err = -ENOMEM;
0751
0752 area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP);
0753 if (!area)
0754 return -ENOMEM;
0755 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
0756 XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info))
0757 goto failed;
0758 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
0759 info, GNTMAP_host_map | GNTMAP_contains_pte,
0760 &leaked);
0761 if (err)
0762 goto failed;
0763
0764 node->nr_handles = nr_grefs;
0765 node->pv.area = area;
0766
0767 spin_lock(&xenbus_valloc_lock);
0768 list_add(&node->next, &xenbus_valloc_pages);
0769 spin_unlock(&xenbus_valloc_lock);
0770
0771 *vaddr = area->addr;
0772 info->node = NULL;
0773
0774 return 0;
0775
0776 failed:
0777 if (!leaked)
0778 free_vm_area(area);
0779 else
0780 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
0781
0782 return err;
0783 }
0784
0785 static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
0786 {
0787 struct xenbus_map_node *node;
0788 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
0789 unsigned int level;
0790 int i;
0791 bool leaked = false;
0792 int err;
0793
0794 spin_lock(&xenbus_valloc_lock);
0795 list_for_each_entry(node, &xenbus_valloc_pages, next) {
0796 if (node->pv.area->addr == vaddr) {
0797 list_del(&node->next);
0798 goto found;
0799 }
0800 }
0801 node = NULL;
0802 found:
0803 spin_unlock(&xenbus_valloc_lock);
0804
0805 if (!node) {
0806 xenbus_dev_error(dev, -ENOENT,
0807 "can't find mapped virtual address %p", vaddr);
0808 return GNTST_bad_virt_addr;
0809 }
0810
0811 for (i = 0; i < node->nr_handles; i++) {
0812 unsigned long addr;
0813
0814 memset(&unmap[i], 0, sizeof(unmap[i]));
0815 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
0816 unmap[i].host_addr = arbitrary_virt_to_machine(
0817 lookup_address(addr, &level)).maddr;
0818 unmap[i].dev_bus_addr = 0;
0819 unmap[i].handle = node->handles[i];
0820 }
0821
0822 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i));
0823
0824 err = GNTST_okay;
0825 leaked = false;
0826 for (i = 0; i < node->nr_handles; i++) {
0827 if (unmap[i].status != GNTST_okay) {
0828 leaked = true;
0829 xenbus_dev_error(dev, unmap[i].status,
0830 "unmapping page at handle %d error %d",
0831 node->handles[i], unmap[i].status);
0832 err = unmap[i].status;
0833 break;
0834 }
0835 }
0836
0837 if (!leaked)
0838 free_vm_area(node->pv.area);
0839 else
0840 pr_alert("leaking VM area %p size %u page(s)",
0841 node->pv.area, node->nr_handles);
0842
0843 kfree(node);
0844 return err;
0845 }
0846
0847 static const struct xenbus_ring_ops ring_ops_pv = {
0848 .map = xenbus_map_ring_pv,
0849 .unmap = xenbus_unmap_ring_pv,
0850 };
0851 #endif
0852
0853 struct unmap_ring_hvm
0854 {
0855 unsigned int idx;
0856 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
0857 };
0858
0859 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
0860 unsigned int goffset,
0861 unsigned int len,
0862 void *data)
0863 {
0864 struct unmap_ring_hvm *info = data;
0865
0866 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
0867
0868 info->idx++;
0869 }
0870
0871 static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
0872 {
0873 int rv;
0874 struct xenbus_map_node *node;
0875 void *addr;
0876 struct unmap_ring_hvm info = {
0877 .idx = 0,
0878 };
0879 unsigned int nr_pages;
0880
0881 spin_lock(&xenbus_valloc_lock);
0882 list_for_each_entry(node, &xenbus_valloc_pages, next) {
0883 addr = node->hvm.addr;
0884 if (addr == vaddr) {
0885 list_del(&node->next);
0886 goto found;
0887 }
0888 }
0889 node = addr = NULL;
0890 found:
0891 spin_unlock(&xenbus_valloc_lock);
0892
0893 if (!node) {
0894 xenbus_dev_error(dev, -ENOENT,
0895 "can't find mapped virtual address %p", vaddr);
0896 return GNTST_bad_virt_addr;
0897 }
0898
0899 nr_pages = XENBUS_PAGES(node->nr_handles);
0900
0901 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
0902 xenbus_unmap_ring_setup_grant_hvm,
0903 &info);
0904
0905 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
0906 info.addrs);
0907 if (!rv) {
0908 vunmap(vaddr);
0909 xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
0910 }
0911 else
0912 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
0913
0914 kfree(node);
0915 return rv;
0916 }
0917
0918
0919
0920
0921
0922
0923
0924
0925 enum xenbus_state xenbus_read_driver_state(const char *path)
0926 {
0927 enum xenbus_state result;
0928 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
0929 if (err)
0930 result = XenbusStateUnknown;
0931
0932 return result;
0933 }
0934 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
0935
0936 static const struct xenbus_ring_ops ring_ops_hvm = {
0937 .map = xenbus_map_ring_hvm,
0938 .unmap = xenbus_unmap_ring_hvm,
0939 };
0940
0941 void __init xenbus_ring_ops_init(void)
0942 {
0943 #ifdef CONFIG_XEN_PV
0944 if (!xen_feature(XENFEAT_auto_translated_physmap))
0945 ring_ops = &ring_ops_pv;
0946 else
0947 #endif
0948 ring_ops = &ring_ops_hvm;
0949 }