0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
0035
0036 #include <linux/bitmap.h>
0037 #include <linux/memblock.h>
0038 #include <linux/sched.h>
0039 #include <linux/mm.h>
0040 #include <linux/slab.h>
0041 #include <linux/vmalloc.h>
0042 #include <linux/uaccess.h>
0043 #include <linux/io.h>
0044 #include <linux/delay.h>
0045 #include <linux/hardirq.h>
0046 #include <linux/workqueue.h>
0047 #include <linux/ratelimit.h>
0048 #include <linux/moduleparam.h>
0049 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
0050 #include <linux/dma-mapping.h>
0051 #endif
0052
0053 #include <xen/xen.h>
0054 #include <xen/interface/xen.h>
0055 #include <xen/page.h>
0056 #include <xen/grant_table.h>
0057 #include <xen/interface/memory.h>
0058 #include <xen/hvc-console.h>
0059 #include <xen/swiotlb-xen.h>
0060 #include <xen/balloon.h>
0061 #ifdef CONFIG_X86
0062 #include <asm/xen/cpuid.h>
0063 #endif
0064 #include <xen/mem-reservation.h>
0065 #include <asm/xen/hypercall.h>
0066 #include <asm/xen/interface.h>
0067
0068 #include <asm/sync_bitops.h>
0069
0070 #define GNTTAB_LIST_END 0xffffffff
0071
0072 static grant_ref_t **gnttab_list;
0073 static unsigned int nr_grant_frames;
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 static int gnttab_free_count;
0093 static unsigned int gnttab_size;
0094 static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
0095 static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
0096 static grant_ref_t *gnttab_free_tail_ptr;
0097 static unsigned long *gnttab_free_bitmap;
0098 static DEFINE_SPINLOCK(gnttab_list_lock);
0099
0100 struct grant_frames xen_auto_xlat_grant_frames;
0101 static unsigned int xen_gnttab_version;
0102 module_param_named(version, xen_gnttab_version, uint, 0);
0103
0104 static union {
0105 struct grant_entry_v1 *v1;
0106 union grant_entry_v2 *v2;
0107 void *addr;
0108 } gnttab_shared;
0109
0110
0111 struct gnttab_ops {
0112
0113
0114
0115 unsigned int version;
0116
0117
0118
0119 unsigned int grefs_per_grant_frame;
0120
0121
0122
0123
0124
0125
0126 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
0127
0128
0129
0130
0131 void (*unmap_frames)(void);
0132
0133
0134
0135
0136
0137
0138
0139 void (*update_entry)(grant_ref_t ref, domid_t domid,
0140 unsigned long frame, unsigned flags);
0141
0142
0143
0144
0145
0146
0147
0148 int (*end_foreign_access_ref)(grant_ref_t ref);
0149
0150
0151
0152 unsigned long (*read_frame)(grant_ref_t ref);
0153 };
0154
0155 struct unmap_refs_callback_data {
0156 struct completion completion;
0157 int result;
0158 };
0159
0160 static const struct gnttab_ops *gnttab_interface;
0161
0162
0163 static grant_status_t *grstatus;
0164
0165 static struct gnttab_free_callback *gnttab_free_callback_list;
0166
0167 static int gnttab_expand(unsigned int req_entries);
0168
0169 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
0170 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
0171
0172 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
0173 {
0174 return &gnttab_list[(entry) / RPP][(entry) % RPP];
0175 }
0176
0177 #define gnttab_entry(entry) (*__gnttab_entry(entry))
0178
0179 static int get_free_entries(unsigned count)
0180 {
0181 unsigned long flags;
0182 int ref, rc = 0;
0183 grant_ref_t head;
0184
0185 spin_lock_irqsave(&gnttab_list_lock, flags);
0186
0187 if ((gnttab_free_count < count) &&
0188 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
0189 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0190 return rc;
0191 }
0192
0193 ref = head = gnttab_free_head;
0194 gnttab_free_count -= count;
0195 while (count--) {
0196 bitmap_clear(gnttab_free_bitmap, head, 1);
0197 if (gnttab_free_tail_ptr == __gnttab_entry(head))
0198 gnttab_free_tail_ptr = &gnttab_free_head;
0199 if (count)
0200 head = gnttab_entry(head);
0201 }
0202 gnttab_free_head = gnttab_entry(head);
0203 gnttab_entry(head) = GNTTAB_LIST_END;
0204
0205 if (!gnttab_free_count) {
0206 gnttab_last_free = GNTTAB_LIST_END;
0207 gnttab_free_tail_ptr = NULL;
0208 }
0209
0210 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0211
0212 return ref;
0213 }
0214
0215 static int get_seq_entry_count(void)
0216 {
0217 if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
0218 *gnttab_free_tail_ptr == GNTTAB_LIST_END)
0219 return 0;
0220
0221 return gnttab_last_free - *gnttab_free_tail_ptr + 1;
0222 }
0223
0224
0225 static int get_free_seq(unsigned int count)
0226 {
0227 int ret = -ENOSPC;
0228 unsigned int from, to;
0229 grant_ref_t *last;
0230
0231 gnttab_free_tail_ptr = &gnttab_free_head;
0232 last = &gnttab_free_head;
0233
0234 for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
0235 from < gnttab_size;
0236 from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
0237 to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
0238 from + 1);
0239 if (ret < 0 && to - from >= count) {
0240 ret = from;
0241 bitmap_clear(gnttab_free_bitmap, ret, count);
0242 from += count;
0243 gnttab_free_count -= count;
0244 if (from == to)
0245 continue;
0246 }
0247
0248
0249
0250
0251
0252
0253 while (from < to) {
0254 *last = from;
0255 last = __gnttab_entry(from);
0256 gnttab_last_free = from;
0257 from++;
0258 }
0259 if (to < gnttab_size)
0260 gnttab_free_tail_ptr = __gnttab_entry(to - 1);
0261 }
0262
0263 *last = GNTTAB_LIST_END;
0264 if (gnttab_last_free != gnttab_size - 1)
0265 gnttab_free_tail_ptr = NULL;
0266
0267 return ret;
0268 }
0269
0270 static int get_free_entries_seq(unsigned int count)
0271 {
0272 unsigned long flags;
0273 int ret = 0;
0274
0275 spin_lock_irqsave(&gnttab_list_lock, flags);
0276
0277 if (gnttab_free_count < count) {
0278 ret = gnttab_expand(count - gnttab_free_count);
0279 if (ret < 0)
0280 goto out;
0281 }
0282
0283 if (get_seq_entry_count() < count) {
0284 ret = get_free_seq(count);
0285 if (ret >= 0)
0286 goto out;
0287 ret = gnttab_expand(count - get_seq_entry_count());
0288 if (ret < 0)
0289 goto out;
0290 }
0291
0292 ret = *gnttab_free_tail_ptr;
0293 *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
0294 gnttab_free_count -= count;
0295 if (!gnttab_free_count)
0296 gnttab_free_tail_ptr = NULL;
0297 bitmap_clear(gnttab_free_bitmap, ret, count);
0298
0299 out:
0300 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0301
0302 return ret;
0303 }
0304
0305 static void do_free_callbacks(void)
0306 {
0307 struct gnttab_free_callback *callback, *next;
0308
0309 callback = gnttab_free_callback_list;
0310 gnttab_free_callback_list = NULL;
0311
0312 while (callback != NULL) {
0313 next = callback->next;
0314 if (gnttab_free_count >= callback->count) {
0315 callback->next = NULL;
0316 callback->fn(callback->arg);
0317 } else {
0318 callback->next = gnttab_free_callback_list;
0319 gnttab_free_callback_list = callback;
0320 }
0321 callback = next;
0322 }
0323 }
0324
0325 static inline void check_free_callbacks(void)
0326 {
0327 if (unlikely(gnttab_free_callback_list))
0328 do_free_callbacks();
0329 }
0330
0331 static void put_free_entry_locked(grant_ref_t ref)
0332 {
0333 if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
0334 return;
0335
0336 gnttab_entry(ref) = gnttab_free_head;
0337 gnttab_free_head = ref;
0338 if (!gnttab_free_count)
0339 gnttab_last_free = ref;
0340 if (gnttab_free_tail_ptr == &gnttab_free_head)
0341 gnttab_free_tail_ptr = __gnttab_entry(ref);
0342 gnttab_free_count++;
0343 bitmap_set(gnttab_free_bitmap, ref, 1);
0344 }
0345
0346 static void put_free_entry(grant_ref_t ref)
0347 {
0348 unsigned long flags;
0349
0350 spin_lock_irqsave(&gnttab_list_lock, flags);
0351 put_free_entry_locked(ref);
0352 check_free_callbacks();
0353 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0354 }
0355
0356 static void gnttab_set_free(unsigned int start, unsigned int n)
0357 {
0358 unsigned int i;
0359
0360 for (i = start; i < start + n - 1; i++)
0361 gnttab_entry(i) = i + 1;
0362
0363 gnttab_entry(i) = GNTTAB_LIST_END;
0364 if (!gnttab_free_count) {
0365 gnttab_free_head = start;
0366 gnttab_free_tail_ptr = &gnttab_free_head;
0367 } else {
0368 gnttab_entry(gnttab_last_free) = start;
0369 }
0370 gnttab_free_count += n;
0371 gnttab_last_free = i;
0372
0373 bitmap_set(gnttab_free_bitmap, start, n);
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
0385 unsigned long frame, unsigned flags)
0386 {
0387 gnttab_shared.v1[ref].domid = domid;
0388 gnttab_shared.v1[ref].frame = frame;
0389 wmb();
0390 gnttab_shared.v1[ref].flags = flags;
0391 }
0392
0393 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
0394 unsigned long frame, unsigned int flags)
0395 {
0396 gnttab_shared.v2[ref].hdr.domid = domid;
0397 gnttab_shared.v2[ref].full_page.frame = frame;
0398 wmb();
0399 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
0400 }
0401
0402
0403
0404
0405 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
0406 unsigned long frame, int readonly)
0407 {
0408 gnttab_interface->update_entry(ref, domid, frame,
0409 GTF_permit_access | (readonly ? GTF_readonly : 0));
0410 }
0411 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
0412
0413 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
0414 int readonly)
0415 {
0416 int ref;
0417
0418 ref = get_free_entries(1);
0419 if (unlikely(ref < 0))
0420 return -ENOSPC;
0421
0422 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
0423
0424 return ref;
0425 }
0426 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
0427
0428 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
0429 {
0430 u16 flags, nflags;
0431 u16 *pflags;
0432
0433 pflags = &gnttab_shared.v1[ref].flags;
0434 nflags = *pflags;
0435 do {
0436 flags = nflags;
0437 if (flags & (GTF_reading|GTF_writing))
0438 return 0;
0439 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
0440
0441 return 1;
0442 }
0443
0444 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
0445 {
0446 gnttab_shared.v2[ref].hdr.flags = 0;
0447 mb();
0448 if (grstatus[ref] & (GTF_reading|GTF_writing)) {
0449 return 0;
0450 } else {
0451
0452
0453
0454
0455
0456
0457 #ifdef CONFIG_X86
0458 barrier();
0459 #else
0460 mb();
0461 #endif
0462 }
0463
0464 return 1;
0465 }
0466
0467 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
0468 {
0469 return gnttab_interface->end_foreign_access_ref(ref);
0470 }
0471
0472 int gnttab_end_foreign_access_ref(grant_ref_t ref)
0473 {
0474 if (_gnttab_end_foreign_access_ref(ref))
0475 return 1;
0476 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
0477 return 0;
0478 }
0479 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
0480
0481 static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
0482 {
0483 return gnttab_shared.v1[ref].frame;
0484 }
0485
0486 static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
0487 {
0488 return gnttab_shared.v2[ref].full_page.frame;
0489 }
0490
0491 struct deferred_entry {
0492 struct list_head list;
0493 grant_ref_t ref;
0494 uint16_t warn_delay;
0495 struct page *page;
0496 };
0497 static LIST_HEAD(deferred_list);
0498 static void gnttab_handle_deferred(struct timer_list *);
0499 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
0500
0501 static void gnttab_handle_deferred(struct timer_list *unused)
0502 {
0503 unsigned int nr = 10;
0504 struct deferred_entry *first = NULL;
0505 unsigned long flags;
0506
0507 spin_lock_irqsave(&gnttab_list_lock, flags);
0508 while (nr--) {
0509 struct deferred_entry *entry
0510 = list_first_entry(&deferred_list,
0511 struct deferred_entry, list);
0512
0513 if (entry == first)
0514 break;
0515 list_del(&entry->list);
0516 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0517 if (_gnttab_end_foreign_access_ref(entry->ref)) {
0518 put_free_entry(entry->ref);
0519 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
0520 entry->ref, page_to_pfn(entry->page));
0521 put_page(entry->page);
0522 kfree(entry);
0523 entry = NULL;
0524 } else {
0525 if (!--entry->warn_delay)
0526 pr_info("g.e. %#x still pending\n", entry->ref);
0527 if (!first)
0528 first = entry;
0529 }
0530 spin_lock_irqsave(&gnttab_list_lock, flags);
0531 if (entry)
0532 list_add_tail(&entry->list, &deferred_list);
0533 else if (list_empty(&deferred_list))
0534 break;
0535 }
0536 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
0537 deferred_timer.expires = jiffies + HZ;
0538 add_timer(&deferred_timer);
0539 }
0540 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0541 }
0542
0543 static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
0544 {
0545 struct deferred_entry *entry;
0546 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
0547 const char *what = KERN_WARNING "leaking";
0548
0549 entry = kmalloc(sizeof(*entry), gfp);
0550 if (!page) {
0551 unsigned long gfn = gnttab_interface->read_frame(ref);
0552
0553 page = pfn_to_page(gfn_to_pfn(gfn));
0554 get_page(page);
0555 }
0556
0557 if (entry) {
0558 unsigned long flags;
0559
0560 entry->ref = ref;
0561 entry->page = page;
0562 entry->warn_delay = 60;
0563 spin_lock_irqsave(&gnttab_list_lock, flags);
0564 list_add_tail(&entry->list, &deferred_list);
0565 if (!timer_pending(&deferred_timer)) {
0566 deferred_timer.expires = jiffies + HZ;
0567 add_timer(&deferred_timer);
0568 }
0569 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0570 what = KERN_DEBUG "deferring";
0571 }
0572 printk("%s g.e. %#x (pfn %#lx)\n",
0573 what, ref, page ? page_to_pfn(page) : -1);
0574 }
0575
0576 int gnttab_try_end_foreign_access(grant_ref_t ref)
0577 {
0578 int ret = _gnttab_end_foreign_access_ref(ref);
0579
0580 if (ret)
0581 put_free_entry(ref);
0582
0583 return ret;
0584 }
0585 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
0586
0587 void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
0588 {
0589 if (gnttab_try_end_foreign_access(ref)) {
0590 if (page)
0591 put_page(page);
0592 } else
0593 gnttab_add_deferred(ref, page);
0594 }
0595 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
0596
0597 void gnttab_free_grant_reference(grant_ref_t ref)
0598 {
0599 put_free_entry(ref);
0600 }
0601 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
0602
0603 void gnttab_free_grant_references(grant_ref_t head)
0604 {
0605 grant_ref_t ref;
0606 unsigned long flags;
0607
0608 spin_lock_irqsave(&gnttab_list_lock, flags);
0609 while (head != GNTTAB_LIST_END) {
0610 ref = gnttab_entry(head);
0611 put_free_entry_locked(head);
0612 head = ref;
0613 }
0614 check_free_callbacks();
0615 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0616 }
0617 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
0618
0619 void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
0620 {
0621 unsigned long flags;
0622 unsigned int i;
0623
0624 spin_lock_irqsave(&gnttab_list_lock, flags);
0625 for (i = count; i > 0; i--)
0626 put_free_entry_locked(head + i - 1);
0627 check_free_callbacks();
0628 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0629 }
0630 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
0631
0632 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
0633 {
0634 int h = get_free_entries(count);
0635
0636 if (h < 0)
0637 return -ENOSPC;
0638
0639 *head = h;
0640
0641 return 0;
0642 }
0643 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
0644
0645 int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
0646 {
0647 int h;
0648
0649 if (count == 1)
0650 h = get_free_entries(1);
0651 else
0652 h = get_free_entries_seq(count);
0653
0654 if (h < 0)
0655 return -ENOSPC;
0656
0657 *first = h;
0658
0659 return 0;
0660 }
0661 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
0662
0663 int gnttab_empty_grant_references(const grant_ref_t *private_head)
0664 {
0665 return (*private_head == GNTTAB_LIST_END);
0666 }
0667 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
0668
0669 int gnttab_claim_grant_reference(grant_ref_t *private_head)
0670 {
0671 grant_ref_t g = *private_head;
0672 if (unlikely(g == GNTTAB_LIST_END))
0673 return -ENOSPC;
0674 *private_head = gnttab_entry(g);
0675 return g;
0676 }
0677 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
0678
0679 void gnttab_release_grant_reference(grant_ref_t *private_head,
0680 grant_ref_t release)
0681 {
0682 gnttab_entry(release) = *private_head;
0683 *private_head = release;
0684 }
0685 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
0686
0687 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
0688 void (*fn)(void *), void *arg, u16 count)
0689 {
0690 unsigned long flags;
0691 struct gnttab_free_callback *cb;
0692
0693 spin_lock_irqsave(&gnttab_list_lock, flags);
0694
0695
0696 cb = gnttab_free_callback_list;
0697 while (cb) {
0698 if (cb == callback)
0699 goto out;
0700 cb = cb->next;
0701 }
0702
0703 callback->fn = fn;
0704 callback->arg = arg;
0705 callback->count = count;
0706 callback->next = gnttab_free_callback_list;
0707 gnttab_free_callback_list = callback;
0708 check_free_callbacks();
0709 out:
0710 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0711 }
0712 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
0713
0714 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
0715 {
0716 struct gnttab_free_callback **pcb;
0717 unsigned long flags;
0718
0719 spin_lock_irqsave(&gnttab_list_lock, flags);
0720 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
0721 if (*pcb == callback) {
0722 *pcb = callback->next;
0723 break;
0724 }
0725 }
0726 spin_unlock_irqrestore(&gnttab_list_lock, flags);
0727 }
0728 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
0729
0730 static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
0731 {
0732 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
0733 align;
0734 }
0735
0736 static int grow_gnttab_list(unsigned int more_frames)
0737 {
0738 unsigned int new_nr_grant_frames, extra_entries, i;
0739 unsigned int nr_glist_frames, new_nr_glist_frames;
0740 unsigned int grefs_per_frame;
0741
0742 grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
0743
0744 new_nr_grant_frames = nr_grant_frames + more_frames;
0745 extra_entries = more_frames * grefs_per_frame;
0746
0747 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
0748 new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
0749 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
0750 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
0751 if (!gnttab_list[i])
0752 goto grow_nomem;
0753 }
0754
0755 gnttab_set_free(gnttab_size, extra_entries);
0756
0757 if (!gnttab_free_tail_ptr)
0758 gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
0759
0760 nr_grant_frames = new_nr_grant_frames;
0761 gnttab_size += extra_entries;
0762
0763 check_free_callbacks();
0764
0765 return 0;
0766
0767 grow_nomem:
0768 while (i-- > nr_glist_frames)
0769 free_page((unsigned long) gnttab_list[i]);
0770 return -ENOMEM;
0771 }
0772
0773 static unsigned int __max_nr_grant_frames(void)
0774 {
0775 struct gnttab_query_size query;
0776 int rc;
0777
0778 query.dom = DOMID_SELF;
0779
0780 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
0781 if ((rc < 0) || (query.status != GNTST_okay))
0782 return 4;
0783
0784 return query.max_nr_frames;
0785 }
0786
0787 unsigned int gnttab_max_grant_frames(void)
0788 {
0789 unsigned int xen_max = __max_nr_grant_frames();
0790 static unsigned int boot_max_nr_grant_frames;
0791
0792
0793 if (!boot_max_nr_grant_frames)
0794 boot_max_nr_grant_frames = __max_nr_grant_frames();
0795
0796 if (xen_max > boot_max_nr_grant_frames)
0797 return boot_max_nr_grant_frames;
0798 return xen_max;
0799 }
0800 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
0801
0802 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
0803 {
0804 xen_pfn_t *pfn;
0805 unsigned int max_nr_gframes = __max_nr_grant_frames();
0806 unsigned int i;
0807 void *vaddr;
0808
0809 if (xen_auto_xlat_grant_frames.count)
0810 return -EINVAL;
0811
0812 vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
0813 if (vaddr == NULL) {
0814 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
0815 &addr);
0816 return -ENOMEM;
0817 }
0818 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
0819 if (!pfn) {
0820 memunmap(vaddr);
0821 return -ENOMEM;
0822 }
0823 for (i = 0; i < max_nr_gframes; i++)
0824 pfn[i] = XEN_PFN_DOWN(addr) + i;
0825
0826 xen_auto_xlat_grant_frames.vaddr = vaddr;
0827 xen_auto_xlat_grant_frames.pfn = pfn;
0828 xen_auto_xlat_grant_frames.count = max_nr_gframes;
0829
0830 return 0;
0831 }
0832 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
0833
0834 void gnttab_free_auto_xlat_frames(void)
0835 {
0836 if (!xen_auto_xlat_grant_frames.count)
0837 return;
0838 kfree(xen_auto_xlat_grant_frames.pfn);
0839 memunmap(xen_auto_xlat_grant_frames.vaddr);
0840
0841 xen_auto_xlat_grant_frames.pfn = NULL;
0842 xen_auto_xlat_grant_frames.count = 0;
0843 xen_auto_xlat_grant_frames.vaddr = NULL;
0844 }
0845 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
0846
0847 int gnttab_pages_set_private(int nr_pages, struct page **pages)
0848 {
0849 int i;
0850
0851 for (i = 0; i < nr_pages; i++) {
0852 #if BITS_PER_LONG < 64
0853 struct xen_page_foreign *foreign;
0854
0855 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
0856 if (!foreign)
0857 return -ENOMEM;
0858
0859 set_page_private(pages[i], (unsigned long)foreign);
0860 #endif
0861 SetPagePrivate(pages[i]);
0862 }
0863
0864 return 0;
0865 }
0866 EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
0867
0868
0869
0870
0871
0872
0873 int gnttab_alloc_pages(int nr_pages, struct page **pages)
0874 {
0875 int ret;
0876
0877 ret = xen_alloc_unpopulated_pages(nr_pages, pages);
0878 if (ret < 0)
0879 return ret;
0880
0881 ret = gnttab_pages_set_private(nr_pages, pages);
0882 if (ret < 0)
0883 gnttab_free_pages(nr_pages, pages);
0884
0885 return ret;
0886 }
0887 EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
0888
0889 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
0890 static inline void cache_init(struct gnttab_page_cache *cache)
0891 {
0892 cache->pages = NULL;
0893 }
0894
0895 static inline bool cache_empty(struct gnttab_page_cache *cache)
0896 {
0897 return !cache->pages;
0898 }
0899
0900 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
0901 {
0902 struct page *page;
0903
0904 page = cache->pages;
0905 cache->pages = page->zone_device_data;
0906
0907 return page;
0908 }
0909
0910 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
0911 {
0912 page->zone_device_data = cache->pages;
0913 cache->pages = page;
0914 }
0915 #else
0916 static inline void cache_init(struct gnttab_page_cache *cache)
0917 {
0918 INIT_LIST_HEAD(&cache->pages);
0919 }
0920
0921 static inline bool cache_empty(struct gnttab_page_cache *cache)
0922 {
0923 return list_empty(&cache->pages);
0924 }
0925
0926 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
0927 {
0928 struct page *page;
0929
0930 page = list_first_entry(&cache->pages, struct page, lru);
0931 list_del(&page->lru);
0932
0933 return page;
0934 }
0935
0936 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
0937 {
0938 list_add(&page->lru, &cache->pages);
0939 }
0940 #endif
0941
0942 void gnttab_page_cache_init(struct gnttab_page_cache *cache)
0943 {
0944 spin_lock_init(&cache->lock);
0945 cache_init(cache);
0946 cache->num_pages = 0;
0947 }
0948 EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
0949
0950 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
0951 {
0952 unsigned long flags;
0953
0954 spin_lock_irqsave(&cache->lock, flags);
0955
0956 if (cache_empty(cache)) {
0957 spin_unlock_irqrestore(&cache->lock, flags);
0958 return gnttab_alloc_pages(1, page);
0959 }
0960
0961 page[0] = cache_deq(cache);
0962 cache->num_pages--;
0963
0964 spin_unlock_irqrestore(&cache->lock, flags);
0965
0966 return 0;
0967 }
0968 EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
0969
0970 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
0971 unsigned int num)
0972 {
0973 unsigned long flags;
0974 unsigned int i;
0975
0976 spin_lock_irqsave(&cache->lock, flags);
0977
0978 for (i = 0; i < num; i++)
0979 cache_enq(cache, page[i]);
0980 cache->num_pages += num;
0981
0982 spin_unlock_irqrestore(&cache->lock, flags);
0983 }
0984 EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
0985
0986 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
0987 {
0988 struct page *page[10];
0989 unsigned int i = 0;
0990 unsigned long flags;
0991
0992 spin_lock_irqsave(&cache->lock, flags);
0993
0994 while (cache->num_pages > num) {
0995 page[i] = cache_deq(cache);
0996 cache->num_pages--;
0997 if (++i == ARRAY_SIZE(page)) {
0998 spin_unlock_irqrestore(&cache->lock, flags);
0999 gnttab_free_pages(i, page);
1000 i = 0;
1001 spin_lock_irqsave(&cache->lock, flags);
1002 }
1003 }
1004
1005 spin_unlock_irqrestore(&cache->lock, flags);
1006
1007 if (i != 0)
1008 gnttab_free_pages(i, page);
1009 }
1010 EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
1011
1012 void gnttab_pages_clear_private(int nr_pages, struct page **pages)
1013 {
1014 int i;
1015
1016 for (i = 0; i < nr_pages; i++) {
1017 if (PagePrivate(pages[i])) {
1018 #if BITS_PER_LONG < 64
1019 kfree((void *)page_private(pages[i]));
1020 #endif
1021 ClearPagePrivate(pages[i]);
1022 }
1023 }
1024 }
1025 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
1026
1027
1028
1029
1030
1031
1032 void gnttab_free_pages(int nr_pages, struct page **pages)
1033 {
1034 gnttab_pages_clear_private(nr_pages, pages);
1035 xen_free_unpopulated_pages(nr_pages, pages);
1036 }
1037 EXPORT_SYMBOL_GPL(gnttab_free_pages);
1038
1039 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1040
1041
1042
1043
1044 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
1045 {
1046 unsigned long pfn, start_pfn;
1047 size_t size;
1048 int i, ret;
1049
1050 if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
1051 return -ENOMEM;
1052
1053 size = args->nr_pages << PAGE_SHIFT;
1054 if (args->coherent)
1055 args->vaddr = dma_alloc_coherent(args->dev, size,
1056 &args->dev_bus_addr,
1057 GFP_KERNEL | __GFP_NOWARN);
1058 else
1059 args->vaddr = dma_alloc_wc(args->dev, size,
1060 &args->dev_bus_addr,
1061 GFP_KERNEL | __GFP_NOWARN);
1062 if (!args->vaddr) {
1063 pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1064 return -ENOMEM;
1065 }
1066
1067 start_pfn = __phys_to_pfn(args->dev_bus_addr);
1068 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1069 pfn++, i++) {
1070 struct page *page = pfn_to_page(pfn);
1071
1072 args->pages[i] = page;
1073 args->frames[i] = xen_page_to_gfn(page);
1074 xenmem_reservation_scrub_page(page);
1075 }
1076
1077 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1078
1079 ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1080 if (ret != args->nr_pages) {
1081 pr_debug("Failed to decrease reservation for DMA buffer\n");
1082 ret = -EFAULT;
1083 goto fail;
1084 }
1085
1086 ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1087 if (ret < 0)
1088 goto fail;
1089
1090 return 0;
1091
1092 fail:
1093 gnttab_dma_free_pages(args);
1094 return ret;
1095 }
1096 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1097
1098
1099
1100
1101
1102 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1103 {
1104 size_t size;
1105 int i, ret;
1106
1107 gnttab_pages_clear_private(args->nr_pages, args->pages);
1108
1109 for (i = 0; i < args->nr_pages; i++)
1110 args->frames[i] = page_to_xen_pfn(args->pages[i]);
1111
1112 ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1113 if (ret != args->nr_pages) {
1114 pr_debug("Failed to increase reservation for DMA buffer\n");
1115 ret = -EFAULT;
1116 } else {
1117 ret = 0;
1118 }
1119
1120 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1121 args->frames);
1122
1123 size = args->nr_pages << PAGE_SHIFT;
1124 if (args->coherent)
1125 dma_free_coherent(args->dev, size,
1126 args->vaddr, args->dev_bus_addr);
1127 else
1128 dma_free_wc(args->dev, size,
1129 args->vaddr, args->dev_bus_addr);
1130 return ret;
1131 }
1132 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1133 #endif
1134
1135
1136 #define MAX_DELAY 256
1137 static inline void
1138 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1139 const char *func)
1140 {
1141 unsigned delay = 1;
1142
1143 do {
1144 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1145 if (*status == GNTST_eagain)
1146 msleep(delay++);
1147 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1148
1149 if (delay >= MAX_DELAY) {
1150 pr_err("%s: %s eagain grant\n", func, current->comm);
1151 *status = GNTST_bad_page;
1152 }
1153 }
1154
1155 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1156 {
1157 struct gnttab_map_grant_ref *op;
1158
1159 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1160 BUG();
1161 for (op = batch; op < batch + count; op++)
1162 if (op->status == GNTST_eagain)
1163 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1164 &op->status, __func__);
1165 }
1166 EXPORT_SYMBOL_GPL(gnttab_batch_map);
1167
1168 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1169 {
1170 struct gnttab_copy *op;
1171
1172 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1173 BUG();
1174 for (op = batch; op < batch + count; op++)
1175 if (op->status == GNTST_eagain)
1176 gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1177 &op->status, __func__);
1178 }
1179 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1180
1181 void gnttab_foreach_grant_in_range(struct page *page,
1182 unsigned int offset,
1183 unsigned int len,
1184 xen_grant_fn_t fn,
1185 void *data)
1186 {
1187 unsigned int goffset;
1188 unsigned int glen;
1189 unsigned long xen_pfn;
1190
1191 len = min_t(unsigned int, PAGE_SIZE - offset, len);
1192 goffset = xen_offset_in_page(offset);
1193
1194 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1195
1196 while (len) {
1197 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1198 fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1199
1200 goffset = 0;
1201 xen_pfn++;
1202 len -= glen;
1203 }
1204 }
1205 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1206
1207 void gnttab_foreach_grant(struct page **pages,
1208 unsigned int nr_grefs,
1209 xen_grant_fn_t fn,
1210 void *data)
1211 {
1212 unsigned int goffset = 0;
1213 unsigned long xen_pfn = 0;
1214 unsigned int i;
1215
1216 for (i = 0; i < nr_grefs; i++) {
1217 if ((i % XEN_PFN_PER_PAGE) == 0) {
1218 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1219 goffset = 0;
1220 }
1221
1222 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1223
1224 goffset += XEN_PAGE_SIZE;
1225 xen_pfn++;
1226 }
1227 }
1228
1229 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1230 struct gnttab_map_grant_ref *kmap_ops,
1231 struct page **pages, unsigned int count)
1232 {
1233 int i, ret;
1234
1235 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1236 if (ret)
1237 return ret;
1238
1239 for (i = 0; i < count; i++) {
1240 switch (map_ops[i].status) {
1241 case GNTST_okay:
1242 {
1243 struct xen_page_foreign *foreign;
1244
1245 SetPageForeign(pages[i]);
1246 foreign = xen_page_foreign(pages[i]);
1247 foreign->domid = map_ops[i].dom;
1248 foreign->gref = map_ops[i].ref;
1249 break;
1250 }
1251
1252 case GNTST_no_device_space:
1253 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1254 break;
1255
1256 case GNTST_eagain:
1257
1258 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1259 map_ops + i,
1260 &map_ops[i].status, __func__);
1261
1262 i--;
1263 break;
1264
1265 default:
1266 break;
1267 }
1268 }
1269
1270 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1271 }
1272 EXPORT_SYMBOL_GPL(gnttab_map_refs);
1273
1274 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1275 struct gnttab_unmap_grant_ref *kunmap_ops,
1276 struct page **pages, unsigned int count)
1277 {
1278 unsigned int i;
1279 int ret;
1280
1281 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1282 if (ret)
1283 return ret;
1284
1285 for (i = 0; i < count; i++)
1286 ClearPageForeign(pages[i]);
1287
1288 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1289 }
1290 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1291
1292 #define GNTTAB_UNMAP_REFS_DELAY 5
1293
1294 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1295
1296 static void gnttab_unmap_work(struct work_struct *work)
1297 {
1298 struct gntab_unmap_queue_data
1299 *unmap_data = container_of(work,
1300 struct gntab_unmap_queue_data,
1301 gnttab_work.work);
1302 if (unmap_data->age != UINT_MAX)
1303 unmap_data->age++;
1304 __gnttab_unmap_refs_async(unmap_data);
1305 }
1306
1307 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1308 {
1309 int ret;
1310 int pc;
1311
1312 for (pc = 0; pc < item->count; pc++) {
1313 if (page_count(item->pages[pc]) > 1) {
1314 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1315 schedule_delayed_work(&item->gnttab_work,
1316 msecs_to_jiffies(delay));
1317 return;
1318 }
1319 }
1320
1321 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1322 item->pages, item->count);
1323 item->done(ret, item);
1324 }
1325
1326 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1327 {
1328 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1329 item->age = 0;
1330
1331 __gnttab_unmap_refs_async(item);
1332 }
1333 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1334
1335 static void unmap_refs_callback(int result,
1336 struct gntab_unmap_queue_data *data)
1337 {
1338 struct unmap_refs_callback_data *d = data->data;
1339
1340 d->result = result;
1341 complete(&d->completion);
1342 }
1343
1344 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1345 {
1346 struct unmap_refs_callback_data data;
1347
1348 init_completion(&data.completion);
1349 item->data = &data;
1350 item->done = &unmap_refs_callback;
1351 gnttab_unmap_refs_async(item);
1352 wait_for_completion(&data.completion);
1353
1354 return data.result;
1355 }
1356 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1357
1358 static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1359 {
1360 return gnttab_frames(nr_grant_frames, SPP);
1361 }
1362
1363 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1364 {
1365 int rc;
1366
1367 rc = arch_gnttab_map_shared(frames, nr_gframes,
1368 gnttab_max_grant_frames(),
1369 &gnttab_shared.addr);
1370 BUG_ON(rc);
1371
1372 return 0;
1373 }
1374
1375 static void gnttab_unmap_frames_v1(void)
1376 {
1377 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1378 }
1379
1380 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1381 {
1382 uint64_t *sframes;
1383 unsigned int nr_sframes;
1384 struct gnttab_get_status_frames getframes;
1385 int rc;
1386
1387 nr_sframes = nr_status_frames(nr_gframes);
1388
1389
1390
1391
1392 sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1393 if (!sframes)
1394 return -ENOMEM;
1395
1396 getframes.dom = DOMID_SELF;
1397 getframes.nr_frames = nr_sframes;
1398 set_xen_guest_handle(getframes.frame_list, sframes);
1399
1400 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1401 &getframes, 1);
1402 if (rc == -ENOSYS) {
1403 kfree(sframes);
1404 return -ENOSYS;
1405 }
1406
1407 BUG_ON(rc || getframes.status);
1408
1409 rc = arch_gnttab_map_status(sframes, nr_sframes,
1410 nr_status_frames(gnttab_max_grant_frames()),
1411 &grstatus);
1412 BUG_ON(rc);
1413 kfree(sframes);
1414
1415 rc = arch_gnttab_map_shared(frames, nr_gframes,
1416 gnttab_max_grant_frames(),
1417 &gnttab_shared.addr);
1418 BUG_ON(rc);
1419
1420 return 0;
1421 }
1422
1423 static void gnttab_unmap_frames_v2(void)
1424 {
1425 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1426 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1427 }
1428
1429 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1430 {
1431 struct gnttab_setup_table setup;
1432 xen_pfn_t *frames;
1433 unsigned int nr_gframes = end_idx + 1;
1434 int rc;
1435
1436 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1437 struct xen_add_to_physmap xatp;
1438 unsigned int i = end_idx;
1439 rc = 0;
1440 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1441
1442
1443
1444
1445 do {
1446 xatp.domid = DOMID_SELF;
1447 xatp.idx = i;
1448 xatp.space = XENMAPSPACE_grant_table;
1449 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1450 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1451 if (rc != 0) {
1452 pr_warn("grant table add_to_physmap failed, err=%d\n",
1453 rc);
1454 break;
1455 }
1456 } while (i-- > start_idx);
1457
1458 return rc;
1459 }
1460
1461
1462
1463
1464 frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1465 if (!frames)
1466 return -ENOMEM;
1467
1468 setup.dom = DOMID_SELF;
1469 setup.nr_frames = nr_gframes;
1470 set_xen_guest_handle(setup.frame_list, frames);
1471
1472 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1473 if (rc == -ENOSYS) {
1474 kfree(frames);
1475 return -ENOSYS;
1476 }
1477
1478 BUG_ON(rc || setup.status);
1479
1480 rc = gnttab_interface->map_frames(frames, nr_gframes);
1481
1482 kfree(frames);
1483
1484 return rc;
1485 }
1486
1487 static const struct gnttab_ops gnttab_v1_ops = {
1488 .version = 1,
1489 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1490 sizeof(struct grant_entry_v1),
1491 .map_frames = gnttab_map_frames_v1,
1492 .unmap_frames = gnttab_unmap_frames_v1,
1493 .update_entry = gnttab_update_entry_v1,
1494 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
1495 .read_frame = gnttab_read_frame_v1,
1496 };
1497
1498 static const struct gnttab_ops gnttab_v2_ops = {
1499 .version = 2,
1500 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1501 sizeof(union grant_entry_v2),
1502 .map_frames = gnttab_map_frames_v2,
1503 .unmap_frames = gnttab_unmap_frames_v2,
1504 .update_entry = gnttab_update_entry_v2,
1505 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
1506 .read_frame = gnttab_read_frame_v2,
1507 };
1508
1509 static bool gnttab_need_v2(void)
1510 {
1511 #ifdef CONFIG_X86
1512 uint32_t base, width;
1513
1514 if (xen_pv_domain()) {
1515 base = xen_cpuid_base();
1516 if (cpuid_eax(base) < 5)
1517 return false;
1518 width = cpuid_ebx(base + 5) &
1519 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1520 return width > 32 + PAGE_SHIFT;
1521 }
1522 #endif
1523 return !!(max_possible_pfn >> 32);
1524 }
1525
1526 static void gnttab_request_version(void)
1527 {
1528 long rc;
1529 struct gnttab_set_version gsv;
1530
1531 if (gnttab_need_v2())
1532 gsv.version = 2;
1533 else
1534 gsv.version = 1;
1535
1536
1537 if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1538 gsv.version = xen_gnttab_version;
1539
1540 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1541 if (rc == 0 && gsv.version == 2)
1542 gnttab_interface = &gnttab_v2_ops;
1543 else
1544 gnttab_interface = &gnttab_v1_ops;
1545 pr_info("Grant tables using version %d layout\n",
1546 gnttab_interface->version);
1547 }
1548
1549 static int gnttab_setup(void)
1550 {
1551 unsigned int max_nr_gframes;
1552
1553 max_nr_gframes = gnttab_max_grant_frames();
1554 if (max_nr_gframes < nr_grant_frames)
1555 return -ENOSYS;
1556
1557 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1558 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1559 if (gnttab_shared.addr == NULL) {
1560 pr_warn("gnttab share frames is not mapped!\n");
1561 return -ENOMEM;
1562 }
1563 }
1564 return gnttab_map(0, nr_grant_frames - 1);
1565 }
1566
1567 int gnttab_resume(void)
1568 {
1569 gnttab_request_version();
1570 return gnttab_setup();
1571 }
1572
1573 int gnttab_suspend(void)
1574 {
1575 if (!xen_feature(XENFEAT_auto_translated_physmap))
1576 gnttab_interface->unmap_frames();
1577 return 0;
1578 }
1579
1580 static int gnttab_expand(unsigned int req_entries)
1581 {
1582 int rc;
1583 unsigned int cur, extra;
1584
1585 cur = nr_grant_frames;
1586 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1587 gnttab_interface->grefs_per_grant_frame);
1588 if (cur + extra > gnttab_max_grant_frames()) {
1589 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1590 " cur=%u extra=%u limit=%u"
1591 " gnttab_free_count=%u req_entries=%u\n",
1592 cur, extra, gnttab_max_grant_frames(),
1593 gnttab_free_count, req_entries);
1594 return -ENOSPC;
1595 }
1596
1597 rc = gnttab_map(cur, cur + extra - 1);
1598 if (rc == 0)
1599 rc = grow_gnttab_list(extra);
1600
1601 return rc;
1602 }
1603
1604 int gnttab_init(void)
1605 {
1606 int i;
1607 unsigned long max_nr_grant_frames, max_nr_grefs;
1608 unsigned int max_nr_glist_frames, nr_glist_frames;
1609 int ret;
1610
1611 gnttab_request_version();
1612 max_nr_grant_frames = gnttab_max_grant_frames();
1613 max_nr_grefs = max_nr_grant_frames *
1614 gnttab_interface->grefs_per_grant_frame;
1615 nr_grant_frames = 1;
1616
1617
1618
1619
1620 max_nr_glist_frames = max_nr_grefs / RPP;
1621
1622 gnttab_list = kmalloc_array(max_nr_glist_frames,
1623 sizeof(grant_ref_t *),
1624 GFP_KERNEL);
1625 if (gnttab_list == NULL)
1626 return -ENOMEM;
1627
1628 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1629 for (i = 0; i < nr_glist_frames; i++) {
1630 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1631 if (gnttab_list[i] == NULL) {
1632 ret = -ENOMEM;
1633 goto ini_nomem;
1634 }
1635 }
1636
1637 gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
1638 if (!gnttab_free_bitmap) {
1639 ret = -ENOMEM;
1640 goto ini_nomem;
1641 }
1642
1643 ret = arch_gnttab_init(max_nr_grant_frames,
1644 nr_status_frames(max_nr_grant_frames));
1645 if (ret < 0)
1646 goto ini_nomem;
1647
1648 if (gnttab_setup() < 0) {
1649 ret = -ENODEV;
1650 goto ini_nomem;
1651 }
1652
1653 gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
1654
1655 gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
1656 gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
1657
1658 printk("Grant table initialized\n");
1659 return 0;
1660
1661 ini_nomem:
1662 for (i--; i >= 0; i--)
1663 free_page((unsigned long)gnttab_list[i]);
1664 kfree(gnttab_list);
1665 bitmap_free(gnttab_free_bitmap);
1666 return ret;
1667 }
1668 EXPORT_SYMBOL_GPL(gnttab_init);
1669
1670 static int __gnttab_init(void)
1671 {
1672 if (!xen_domain())
1673 return -ENODEV;
1674
1675
1676 if (xen_hvm_domain() && !xen_pvh_domain())
1677 return 0;
1678
1679 return gnttab_init();
1680 }
1681
1682
1683 core_initcall_sync(__gnttab_init);