0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/pagewalk.h>
0013 #include <linux/swap.h>
0014 #include <linux/smp.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/slab.h>
0017 #include <linux/swapops.h>
0018 #include <linux/ksm.h>
0019 #include <linux/mman.h>
0020 #include <linux/pgtable.h>
0021
0022 #include <asm/pgalloc.h>
0023 #include <asm/gmap.h>
0024 #include <asm/tlb.h>
0025
0026 #define GMAP_SHADOW_FAKE_TABLE 1ULL
0027
0028
0029
0030
0031
0032
0033
0034 static struct gmap *gmap_alloc(unsigned long limit)
0035 {
0036 struct gmap *gmap;
0037 struct page *page;
0038 unsigned long *table;
0039 unsigned long etype, atype;
0040
0041 if (limit < _REGION3_SIZE) {
0042 limit = _REGION3_SIZE - 1;
0043 atype = _ASCE_TYPE_SEGMENT;
0044 etype = _SEGMENT_ENTRY_EMPTY;
0045 } else if (limit < _REGION2_SIZE) {
0046 limit = _REGION2_SIZE - 1;
0047 atype = _ASCE_TYPE_REGION3;
0048 etype = _REGION3_ENTRY_EMPTY;
0049 } else if (limit < _REGION1_SIZE) {
0050 limit = _REGION1_SIZE - 1;
0051 atype = _ASCE_TYPE_REGION2;
0052 etype = _REGION2_ENTRY_EMPTY;
0053 } else {
0054 limit = -1UL;
0055 atype = _ASCE_TYPE_REGION1;
0056 etype = _REGION1_ENTRY_EMPTY;
0057 }
0058 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
0059 if (!gmap)
0060 goto out;
0061 INIT_LIST_HEAD(&gmap->crst_list);
0062 INIT_LIST_HEAD(&gmap->children);
0063 INIT_LIST_HEAD(&gmap->pt_list);
0064 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
0065 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
0066 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
0067 spin_lock_init(&gmap->guest_table_lock);
0068 spin_lock_init(&gmap->shadow_lock);
0069 refcount_set(&gmap->ref_count, 1);
0070 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
0071 if (!page)
0072 goto out_free;
0073 page->index = 0;
0074 list_add(&page->lru, &gmap->crst_list);
0075 table = (unsigned long *) page_to_phys(page);
0076 crst_table_init(table, etype);
0077 gmap->table = table;
0078 gmap->asce = atype | _ASCE_TABLE_LENGTH |
0079 _ASCE_USER_BITS | __pa(table);
0080 gmap->asce_end = limit;
0081 return gmap;
0082
0083 out_free:
0084 kfree(gmap);
0085 out:
0086 return NULL;
0087 }
0088
0089
0090
0091
0092
0093
0094
0095
0096 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
0097 {
0098 struct gmap *gmap;
0099 unsigned long gmap_asce;
0100
0101 gmap = gmap_alloc(limit);
0102 if (!gmap)
0103 return NULL;
0104 gmap->mm = mm;
0105 spin_lock(&mm->context.lock);
0106 list_add_rcu(&gmap->list, &mm->context.gmap_list);
0107 if (list_is_singular(&mm->context.gmap_list))
0108 gmap_asce = gmap->asce;
0109 else
0110 gmap_asce = -1UL;
0111 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
0112 spin_unlock(&mm->context.lock);
0113 return gmap;
0114 }
0115 EXPORT_SYMBOL_GPL(gmap_create);
0116
0117 static void gmap_flush_tlb(struct gmap *gmap)
0118 {
0119 if (MACHINE_HAS_IDTE)
0120 __tlb_flush_idte(gmap->asce);
0121 else
0122 __tlb_flush_global();
0123 }
0124
0125 static void gmap_radix_tree_free(struct radix_tree_root *root)
0126 {
0127 struct radix_tree_iter iter;
0128 unsigned long indices[16];
0129 unsigned long index;
0130 void __rcu **slot;
0131 int i, nr;
0132
0133
0134 index = 0;
0135 do {
0136 nr = 0;
0137 radix_tree_for_each_slot(slot, root, &iter, index) {
0138 indices[nr] = iter.index;
0139 if (++nr == 16)
0140 break;
0141 }
0142 for (i = 0; i < nr; i++) {
0143 index = indices[i];
0144 radix_tree_delete(root, index);
0145 }
0146 } while (nr > 0);
0147 }
0148
0149 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
0150 {
0151 struct gmap_rmap *rmap, *rnext, *head;
0152 struct radix_tree_iter iter;
0153 unsigned long indices[16];
0154 unsigned long index;
0155 void __rcu **slot;
0156 int i, nr;
0157
0158
0159 index = 0;
0160 do {
0161 nr = 0;
0162 radix_tree_for_each_slot(slot, root, &iter, index) {
0163 indices[nr] = iter.index;
0164 if (++nr == 16)
0165 break;
0166 }
0167 for (i = 0; i < nr; i++) {
0168 index = indices[i];
0169 head = radix_tree_delete(root, index);
0170 gmap_for_each_rmap_safe(rmap, rnext, head)
0171 kfree(rmap);
0172 }
0173 } while (nr > 0);
0174 }
0175
0176
0177
0178
0179
0180
0181
0182 static void gmap_free(struct gmap *gmap)
0183 {
0184 struct page *page, *next;
0185
0186
0187 if (!(gmap_is_shadow(gmap) && gmap->removed))
0188 gmap_flush_tlb(gmap);
0189
0190 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
0191 __free_pages(page, CRST_ALLOC_ORDER);
0192 gmap_radix_tree_free(&gmap->guest_to_host);
0193 gmap_radix_tree_free(&gmap->host_to_guest);
0194
0195
0196 if (gmap_is_shadow(gmap)) {
0197
0198 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
0199 page_table_free_pgste(page);
0200 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
0201
0202 gmap_put(gmap->parent);
0203 }
0204
0205 kfree(gmap);
0206 }
0207
0208
0209
0210
0211
0212
0213
0214 struct gmap *gmap_get(struct gmap *gmap)
0215 {
0216 refcount_inc(&gmap->ref_count);
0217 return gmap;
0218 }
0219 EXPORT_SYMBOL_GPL(gmap_get);
0220
0221
0222
0223
0224
0225
0226
0227 void gmap_put(struct gmap *gmap)
0228 {
0229 if (refcount_dec_and_test(&gmap->ref_count))
0230 gmap_free(gmap);
0231 }
0232 EXPORT_SYMBOL_GPL(gmap_put);
0233
0234
0235
0236
0237
0238 void gmap_remove(struct gmap *gmap)
0239 {
0240 struct gmap *sg, *next;
0241 unsigned long gmap_asce;
0242
0243
0244 if (!list_empty(&gmap->children)) {
0245 spin_lock(&gmap->shadow_lock);
0246 list_for_each_entry_safe(sg, next, &gmap->children, list) {
0247 list_del(&sg->list);
0248 gmap_put(sg);
0249 }
0250 spin_unlock(&gmap->shadow_lock);
0251 }
0252
0253 spin_lock(&gmap->mm->context.lock);
0254 list_del_rcu(&gmap->list);
0255 if (list_empty(&gmap->mm->context.gmap_list))
0256 gmap_asce = 0;
0257 else if (list_is_singular(&gmap->mm->context.gmap_list))
0258 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
0259 struct gmap, list)->asce;
0260 else
0261 gmap_asce = -1UL;
0262 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
0263 spin_unlock(&gmap->mm->context.lock);
0264 synchronize_rcu();
0265
0266 gmap_put(gmap);
0267 }
0268 EXPORT_SYMBOL_GPL(gmap_remove);
0269
0270
0271
0272
0273
0274 void gmap_enable(struct gmap *gmap)
0275 {
0276 S390_lowcore.gmap = (unsigned long) gmap;
0277 }
0278 EXPORT_SYMBOL_GPL(gmap_enable);
0279
0280
0281
0282
0283
0284 void gmap_disable(struct gmap *gmap)
0285 {
0286 S390_lowcore.gmap = 0UL;
0287 }
0288 EXPORT_SYMBOL_GPL(gmap_disable);
0289
0290
0291
0292
0293
0294
0295 struct gmap *gmap_get_enabled(void)
0296 {
0297 return (struct gmap *) S390_lowcore.gmap;
0298 }
0299 EXPORT_SYMBOL_GPL(gmap_get_enabled);
0300
0301
0302
0303
0304 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
0305 unsigned long init, unsigned long gaddr)
0306 {
0307 struct page *page;
0308 unsigned long *new;
0309
0310
0311 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
0312 if (!page)
0313 return -ENOMEM;
0314 new = (unsigned long *) page_to_phys(page);
0315 crst_table_init(new, init);
0316 spin_lock(&gmap->guest_table_lock);
0317 if (*table & _REGION_ENTRY_INVALID) {
0318 list_add(&page->lru, &gmap->crst_list);
0319 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
0320 (*table & _REGION_ENTRY_TYPE_MASK);
0321 page->index = gaddr;
0322 page = NULL;
0323 }
0324 spin_unlock(&gmap->guest_table_lock);
0325 if (page)
0326 __free_pages(page, CRST_ALLOC_ORDER);
0327 return 0;
0328 }
0329
0330
0331
0332
0333
0334
0335
0336 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
0337 {
0338 struct page *page;
0339 unsigned long offset, mask;
0340
0341 offset = (unsigned long) entry / sizeof(unsigned long);
0342 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
0343 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
0344 page = virt_to_page((void *)((unsigned long) entry & mask));
0345 return page->index + offset;
0346 }
0347
0348
0349
0350
0351
0352
0353
0354
0355 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
0356 {
0357 unsigned long *entry;
0358 int flush = 0;
0359
0360 BUG_ON(gmap_is_shadow(gmap));
0361 spin_lock(&gmap->guest_table_lock);
0362 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
0363 if (entry) {
0364 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
0365 *entry = _SEGMENT_ENTRY_EMPTY;
0366 }
0367 spin_unlock(&gmap->guest_table_lock);
0368 return flush;
0369 }
0370
0371
0372
0373
0374
0375
0376
0377
0378 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
0379 {
0380 unsigned long vmaddr;
0381
0382 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
0383 gaddr >> PMD_SHIFT);
0384 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
0385 }
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
0396 {
0397 unsigned long off;
0398 int flush;
0399
0400 BUG_ON(gmap_is_shadow(gmap));
0401 if ((to | len) & (PMD_SIZE - 1))
0402 return -EINVAL;
0403 if (len == 0 || to + len < to)
0404 return -EINVAL;
0405
0406 flush = 0;
0407 mmap_write_lock(gmap->mm);
0408 for (off = 0; off < len; off += PMD_SIZE)
0409 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
0410 mmap_write_unlock(gmap->mm);
0411 if (flush)
0412 gmap_flush_tlb(gmap);
0413 return 0;
0414 }
0415 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426 int gmap_map_segment(struct gmap *gmap, unsigned long from,
0427 unsigned long to, unsigned long len)
0428 {
0429 unsigned long off;
0430 int flush;
0431
0432 BUG_ON(gmap_is_shadow(gmap));
0433 if ((from | to | len) & (PMD_SIZE - 1))
0434 return -EINVAL;
0435 if (len == 0 || from + len < from || to + len < to ||
0436 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
0437 return -EINVAL;
0438
0439 flush = 0;
0440 mmap_write_lock(gmap->mm);
0441 for (off = 0; off < len; off += PMD_SIZE) {
0442
0443 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
0444
0445 if (radix_tree_insert(&gmap->guest_to_host,
0446 (to + off) >> PMD_SHIFT,
0447 (void *) from + off))
0448 break;
0449 }
0450 mmap_write_unlock(gmap->mm);
0451 if (flush)
0452 gmap_flush_tlb(gmap);
0453 if (off >= len)
0454 return 0;
0455 gmap_unmap_segment(gmap, to, len);
0456 return -ENOMEM;
0457 }
0458 EXPORT_SYMBOL_GPL(gmap_map_segment);
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
0474 {
0475 unsigned long vmaddr;
0476
0477 vmaddr = (unsigned long)
0478 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
0479
0480 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
0481 }
0482 EXPORT_SYMBOL_GPL(__gmap_translate);
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
0494 {
0495 unsigned long rc;
0496
0497 mmap_read_lock(gmap->mm);
0498 rc = __gmap_translate(gmap, gaddr);
0499 mmap_read_unlock(gmap->mm);
0500 return rc;
0501 }
0502 EXPORT_SYMBOL_GPL(gmap_translate);
0503
0504
0505
0506
0507
0508
0509
0510 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
0511 unsigned long vmaddr)
0512 {
0513 struct gmap *gmap;
0514 int flush;
0515
0516 rcu_read_lock();
0517 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
0518 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
0519 if (flush)
0520 gmap_flush_tlb(gmap);
0521 }
0522 rcu_read_unlock();
0523 }
0524
0525 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
0526 unsigned long gaddr);
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
0540 {
0541 struct mm_struct *mm;
0542 unsigned long *table;
0543 spinlock_t *ptl;
0544 pgd_t *pgd;
0545 p4d_t *p4d;
0546 pud_t *pud;
0547 pmd_t *pmd;
0548 u64 unprot;
0549 int rc;
0550
0551 BUG_ON(gmap_is_shadow(gmap));
0552
0553 table = gmap->table;
0554 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
0555 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
0556 if ((*table & _REGION_ENTRY_INVALID) &&
0557 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
0558 gaddr & _REGION1_MASK))
0559 return -ENOMEM;
0560 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0561 }
0562 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
0563 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
0564 if ((*table & _REGION_ENTRY_INVALID) &&
0565 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
0566 gaddr & _REGION2_MASK))
0567 return -ENOMEM;
0568 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0569 }
0570 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
0571 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
0572 if ((*table & _REGION_ENTRY_INVALID) &&
0573 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
0574 gaddr & _REGION3_MASK))
0575 return -ENOMEM;
0576 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0577 }
0578 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
0579
0580 mm = gmap->mm;
0581 pgd = pgd_offset(mm, vmaddr);
0582 VM_BUG_ON(pgd_none(*pgd));
0583 p4d = p4d_offset(pgd, vmaddr);
0584 VM_BUG_ON(p4d_none(*p4d));
0585 pud = pud_offset(p4d, vmaddr);
0586 VM_BUG_ON(pud_none(*pud));
0587
0588 if (pud_large(*pud))
0589 return -EFAULT;
0590 pmd = pmd_offset(pud, vmaddr);
0591 VM_BUG_ON(pmd_none(*pmd));
0592
0593 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
0594 return -EFAULT;
0595
0596 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
0597 if (rc)
0598 return rc;
0599 ptl = pmd_lock(mm, pmd);
0600 spin_lock(&gmap->guest_table_lock);
0601 if (*table == _SEGMENT_ENTRY_EMPTY) {
0602 rc = radix_tree_insert(&gmap->host_to_guest,
0603 vmaddr >> PMD_SHIFT, table);
0604 if (!rc) {
0605 if (pmd_large(*pmd)) {
0606 *table = (pmd_val(*pmd) &
0607 _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
0608 | _SEGMENT_ENTRY_GMAP_UC;
0609 } else
0610 *table = pmd_val(*pmd) &
0611 _SEGMENT_ENTRY_HARDWARE_BITS;
0612 }
0613 } else if (*table & _SEGMENT_ENTRY_PROTECT &&
0614 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
0615 unprot = (u64)*table;
0616 unprot &= ~_SEGMENT_ENTRY_PROTECT;
0617 unprot |= _SEGMENT_ENTRY_GMAP_UC;
0618 gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
0619 }
0620 spin_unlock(&gmap->guest_table_lock);
0621 spin_unlock(ptl);
0622 radix_tree_preload_end();
0623 return rc;
0624 }
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
0636 unsigned int fault_flags)
0637 {
0638 unsigned long vmaddr;
0639 int rc;
0640 bool unlocked;
0641
0642 mmap_read_lock(gmap->mm);
0643
0644 retry:
0645 unlocked = false;
0646 vmaddr = __gmap_translate(gmap, gaddr);
0647 if (IS_ERR_VALUE(vmaddr)) {
0648 rc = vmaddr;
0649 goto out_up;
0650 }
0651 if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
0652 &unlocked)) {
0653 rc = -EFAULT;
0654 goto out_up;
0655 }
0656
0657
0658
0659
0660 if (unlocked)
0661 goto retry;
0662
0663 rc = __gmap_link(gmap, gaddr, vmaddr);
0664 out_up:
0665 mmap_read_unlock(gmap->mm);
0666 return rc;
0667 }
0668 EXPORT_SYMBOL_GPL(gmap_fault);
0669
0670
0671
0672
0673 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
0674 {
0675 struct vm_area_struct *vma;
0676 unsigned long vmaddr;
0677 spinlock_t *ptl;
0678 pte_t *ptep;
0679
0680
0681 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
0682 gaddr >> PMD_SHIFT);
0683 if (vmaddr) {
0684 vmaddr |= gaddr & ~PMD_MASK;
0685
0686 vma = vma_lookup(gmap->mm, vmaddr);
0687 if (!vma || is_vm_hugetlb_page(vma))
0688 return;
0689
0690
0691 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
0692 if (likely(ptep)) {
0693 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
0694 pte_unmap_unlock(ptep, ptl);
0695 }
0696 }
0697 }
0698 EXPORT_SYMBOL_GPL(__gmap_zap);
0699
0700 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
0701 {
0702 unsigned long gaddr, vmaddr, size;
0703 struct vm_area_struct *vma;
0704
0705 mmap_read_lock(gmap->mm);
0706 for (gaddr = from; gaddr < to;
0707 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
0708
0709 vmaddr = (unsigned long)
0710 radix_tree_lookup(&gmap->guest_to_host,
0711 gaddr >> PMD_SHIFT);
0712 if (!vmaddr)
0713 continue;
0714 vmaddr |= gaddr & ~PMD_MASK;
0715
0716 vma = find_vma(gmap->mm, vmaddr);
0717 if (!vma)
0718 continue;
0719
0720
0721
0722
0723 if (is_vm_hugetlb_page(vma))
0724 continue;
0725 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
0726 zap_page_range(vma, vmaddr, size);
0727 }
0728 mmap_read_unlock(gmap->mm);
0729 }
0730 EXPORT_SYMBOL_GPL(gmap_discard);
0731
0732 static LIST_HEAD(gmap_notifier_list);
0733 static DEFINE_SPINLOCK(gmap_notifier_lock);
0734
0735
0736
0737
0738
0739 void gmap_register_pte_notifier(struct gmap_notifier *nb)
0740 {
0741 spin_lock(&gmap_notifier_lock);
0742 list_add_rcu(&nb->list, &gmap_notifier_list);
0743 spin_unlock(&gmap_notifier_lock);
0744 }
0745 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
0746
0747
0748
0749
0750
0751 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
0752 {
0753 spin_lock(&gmap_notifier_lock);
0754 list_del_rcu(&nb->list);
0755 spin_unlock(&gmap_notifier_lock);
0756 synchronize_rcu();
0757 }
0758 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
0759
0760
0761
0762
0763
0764
0765
0766 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
0767 unsigned long end)
0768 {
0769 struct gmap_notifier *nb;
0770
0771 list_for_each_entry(nb, &gmap_notifier_list, list)
0772 nb->notifier_call(gmap, start, end);
0773 }
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
0794 unsigned long gaddr, int level)
0795 {
0796 const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
0797 unsigned long *table = gmap->table;
0798
0799 if (gmap_is_shadow(gmap) && gmap->removed)
0800 return NULL;
0801
0802 if (WARN_ON_ONCE(level > (asce_type >> 2) + 1))
0803 return NULL;
0804
0805 if (asce_type != _ASCE_TYPE_REGION1 &&
0806 gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
0807 return NULL;
0808
0809 switch (asce_type) {
0810 case _ASCE_TYPE_REGION1:
0811 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
0812 if (level == 4)
0813 break;
0814 if (*table & _REGION_ENTRY_INVALID)
0815 return NULL;
0816 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0817 fallthrough;
0818 case _ASCE_TYPE_REGION2:
0819 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
0820 if (level == 3)
0821 break;
0822 if (*table & _REGION_ENTRY_INVALID)
0823 return NULL;
0824 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0825 fallthrough;
0826 case _ASCE_TYPE_REGION3:
0827 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
0828 if (level == 2)
0829 break;
0830 if (*table & _REGION_ENTRY_INVALID)
0831 return NULL;
0832 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0833 fallthrough;
0834 case _ASCE_TYPE_SEGMENT:
0835 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
0836 if (level == 1)
0837 break;
0838 if (*table & _REGION_ENTRY_INVALID)
0839 return NULL;
0840 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
0841 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
0842 }
0843 return table;
0844 }
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
0856 spinlock_t **ptl)
0857 {
0858 unsigned long *table;
0859
0860 BUG_ON(gmap_is_shadow(gmap));
0861
0862 table = gmap_table_walk(gmap, gaddr, 1);
0863 if (!table || *table & _SEGMENT_ENTRY_INVALID)
0864 return NULL;
0865 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
0866 }
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
0880 unsigned long vmaddr, int prot)
0881 {
0882 struct mm_struct *mm = gmap->mm;
0883 unsigned int fault_flags;
0884 bool unlocked = false;
0885
0886 BUG_ON(gmap_is_shadow(gmap));
0887 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
0888 if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked))
0889 return -EFAULT;
0890 if (unlocked)
0891
0892 return 0;
0893
0894 return __gmap_link(gmap, gaddr, vmaddr);
0895 }
0896
0897
0898
0899
0900
0901 static void gmap_pte_op_end(spinlock_t *ptl)
0902 {
0903 if (ptl)
0904 spin_unlock(ptl);
0905 }
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915 static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
0916 {
0917 pmd_t *pmdp;
0918
0919 BUG_ON(gmap_is_shadow(gmap));
0920 pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
0921 if (!pmdp)
0922 return NULL;
0923
0924
0925 if (!gmap->mm->context.allow_gmap_hpage_1m)
0926 return pmd_none(*pmdp) ? NULL : pmdp;
0927
0928 spin_lock(&gmap->guest_table_lock);
0929 if (pmd_none(*pmdp)) {
0930 spin_unlock(&gmap->guest_table_lock);
0931 return NULL;
0932 }
0933
0934
0935 if (!pmd_large(*pmdp))
0936 spin_unlock(&gmap->guest_table_lock);
0937 return pmdp;
0938 }
0939
0940
0941
0942
0943
0944
0945 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
0946 {
0947 if (pmd_large(*pmdp))
0948 spin_unlock(&gmap->guest_table_lock);
0949 }
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965 static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
0966 pmd_t *pmdp, int prot, unsigned long bits)
0967 {
0968 int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
0969 int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
0970 pmd_t new = *pmdp;
0971
0972
0973 if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
0974 return -EAGAIN;
0975
0976 if (prot == PROT_NONE && !pmd_i) {
0977 new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
0978 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
0979 }
0980
0981 if (prot == PROT_READ && !pmd_p) {
0982 new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
0983 new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_PROTECT));
0984 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
0985 }
0986
0987 if (bits & GMAP_NOTIFY_MPROT)
0988 set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
0989
0990
0991 if (bits & GMAP_NOTIFY_SHADOW)
0992 return -EINVAL;
0993
0994 return 0;
0995 }
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
1011 pmd_t *pmdp, int prot, unsigned long bits)
1012 {
1013 int rc;
1014 pte_t *ptep;
1015 spinlock_t *ptl = NULL;
1016 unsigned long pbits = 0;
1017
1018 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1019 return -EAGAIN;
1020
1021 ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1022 if (!ptep)
1023 return -ENOMEM;
1024
1025 pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
1026 pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
1027
1028 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
1029 gmap_pte_op_end(ptl);
1030 return rc;
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1047 unsigned long len, int prot, unsigned long bits)
1048 {
1049 unsigned long vmaddr, dist;
1050 pmd_t *pmdp;
1051 int rc;
1052
1053 BUG_ON(gmap_is_shadow(gmap));
1054 while (len) {
1055 rc = -EAGAIN;
1056 pmdp = gmap_pmd_op_walk(gmap, gaddr);
1057 if (pmdp) {
1058 if (!pmd_large(*pmdp)) {
1059 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1060 bits);
1061 if (!rc) {
1062 len -= PAGE_SIZE;
1063 gaddr += PAGE_SIZE;
1064 }
1065 } else {
1066 rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1067 bits);
1068 if (!rc) {
1069 dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
1070 len = len < dist ? 0 : len - dist;
1071 gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
1072 }
1073 }
1074 gmap_pmd_op_end(gmap, pmdp);
1075 }
1076 if (rc) {
1077 if (rc == -EINVAL)
1078 return rc;
1079
1080
1081 vmaddr = __gmap_translate(gmap, gaddr);
1082 if (IS_ERR_VALUE(vmaddr))
1083 return vmaddr;
1084 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
1085 if (rc)
1086 return rc;
1087 }
1088 }
1089 return 0;
1090 }
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1107 unsigned long len, int prot)
1108 {
1109 int rc;
1110
1111 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
1112 return -EINVAL;
1113 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
1114 return -EINVAL;
1115 mmap_read_lock(gmap->mm);
1116 rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
1117 mmap_read_unlock(gmap->mm);
1118 return rc;
1119 }
1120 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1136 {
1137 unsigned long address, vmaddr;
1138 spinlock_t *ptl;
1139 pte_t *ptep, pte;
1140 int rc;
1141
1142 if (gmap_is_shadow(gmap))
1143 return -EINVAL;
1144
1145 while (1) {
1146 rc = -EAGAIN;
1147 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1148 if (ptep) {
1149 pte = *ptep;
1150 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
1151 address = pte_val(pte) & PAGE_MASK;
1152 address += gaddr & ~PAGE_MASK;
1153 *val = *(unsigned long *) address;
1154 set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
1155
1156 rc = 0;
1157 }
1158 gmap_pte_op_end(ptl);
1159 }
1160 if (!rc)
1161 break;
1162 vmaddr = __gmap_translate(gmap, gaddr);
1163 if (IS_ERR_VALUE(vmaddr)) {
1164 rc = vmaddr;
1165 break;
1166 }
1167 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1168 if (rc)
1169 break;
1170 }
1171 return rc;
1172 }
1173 EXPORT_SYMBOL_GPL(gmap_read_table);
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1184 struct gmap_rmap *rmap)
1185 {
1186 struct gmap_rmap *temp;
1187 void __rcu **slot;
1188
1189 BUG_ON(!gmap_is_shadow(sg));
1190 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1191 if (slot) {
1192 rmap->next = radix_tree_deref_slot_protected(slot,
1193 &sg->guest_table_lock);
1194 for (temp = rmap->next; temp; temp = temp->next) {
1195 if (temp->raddr == rmap->raddr) {
1196 kfree(rmap);
1197 return;
1198 }
1199 }
1200 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1201 } else {
1202 rmap->next = NULL;
1203 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1204 rmap);
1205 }
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1219 unsigned long paddr, unsigned long len)
1220 {
1221 struct gmap *parent;
1222 struct gmap_rmap *rmap;
1223 unsigned long vmaddr;
1224 spinlock_t *ptl;
1225 pte_t *ptep;
1226 int rc;
1227
1228 BUG_ON(!gmap_is_shadow(sg));
1229 parent = sg->parent;
1230 while (len) {
1231 vmaddr = __gmap_translate(parent, paddr);
1232 if (IS_ERR_VALUE(vmaddr))
1233 return vmaddr;
1234 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
1235 if (!rmap)
1236 return -ENOMEM;
1237 rmap->raddr = raddr;
1238 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
1239 if (rc) {
1240 kfree(rmap);
1241 return rc;
1242 }
1243 rc = -EAGAIN;
1244 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1245 if (ptep) {
1246 spin_lock(&sg->guest_table_lock);
1247 rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
1248 PGSTE_VSIE_BIT);
1249 if (!rc)
1250 gmap_insert_rmap(sg, vmaddr, rmap);
1251 spin_unlock(&sg->guest_table_lock);
1252 gmap_pte_op_end(ptl);
1253 }
1254 radix_tree_preload_end();
1255 if (rc) {
1256 kfree(rmap);
1257 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
1258 if (rc)
1259 return rc;
1260 continue;
1261 }
1262 paddr += PAGE_SIZE;
1263 len -= PAGE_SIZE;
1264 }
1265 return 0;
1266 }
1267
1268 #define _SHADOW_RMAP_MASK 0x7
1269 #define _SHADOW_RMAP_REGION1 0x5
1270 #define _SHADOW_RMAP_REGION2 0x4
1271 #define _SHADOW_RMAP_REGION3 0x3
1272 #define _SHADOW_RMAP_SEGMENT 0x2
1273 #define _SHADOW_RMAP_PGTABLE 0x1
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1286 {
1287 asm volatile(
1288 " idte %0,0,%1"
1289 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1290 }
1291
1292
1293
1294
1295
1296
1297
1298
1299 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1300 {
1301 unsigned long *table;
1302
1303 BUG_ON(!gmap_is_shadow(sg));
1304 table = gmap_table_walk(sg, raddr, 0);
1305 if (!table || *table & _PAGE_INVALID)
1306 return;
1307 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1308 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1309 }
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1320 unsigned long *pgt)
1321 {
1322 int i;
1323
1324 BUG_ON(!gmap_is_shadow(sg));
1325 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1326 pgt[i] = _PAGE_INVALID;
1327 }
1328
1329
1330
1331
1332
1333
1334
1335
1336 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1337 {
1338 unsigned long sto, *ste, *pgt;
1339 struct page *page;
1340
1341 BUG_ON(!gmap_is_shadow(sg));
1342 ste = gmap_table_walk(sg, raddr, 1);
1343 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1344 return;
1345 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1346 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1347 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1348 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1349 *ste = _SEGMENT_ENTRY_EMPTY;
1350 __gmap_unshadow_pgt(sg, raddr, pgt);
1351
1352 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1353 list_del(&page->lru);
1354 page_table_free_pgste(page);
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1366 unsigned long *sgt)
1367 {
1368 unsigned long *pgt;
1369 struct page *page;
1370 int i;
1371
1372 BUG_ON(!gmap_is_shadow(sg));
1373 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1374 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1375 continue;
1376 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1377 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1378 __gmap_unshadow_pgt(sg, raddr, pgt);
1379
1380 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1381 list_del(&page->lru);
1382 page_table_free_pgste(page);
1383 }
1384 }
1385
1386
1387
1388
1389
1390
1391
1392
1393 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1394 {
1395 unsigned long r3o, *r3e, *sgt;
1396 struct page *page;
1397
1398 BUG_ON(!gmap_is_shadow(sg));
1399 r3e = gmap_table_walk(sg, raddr, 2);
1400 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1401 return;
1402 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1403 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1404 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1405 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1406 *r3e = _REGION3_ENTRY_EMPTY;
1407 __gmap_unshadow_sgt(sg, raddr, sgt);
1408
1409 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1410 list_del(&page->lru);
1411 __free_pages(page, CRST_ALLOC_ORDER);
1412 }
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1423 unsigned long *r3t)
1424 {
1425 unsigned long *sgt;
1426 struct page *page;
1427 int i;
1428
1429 BUG_ON(!gmap_is_shadow(sg));
1430 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1431 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1432 continue;
1433 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1434 r3t[i] = _REGION3_ENTRY_EMPTY;
1435 __gmap_unshadow_sgt(sg, raddr, sgt);
1436
1437 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1438 list_del(&page->lru);
1439 __free_pages(page, CRST_ALLOC_ORDER);
1440 }
1441 }
1442
1443
1444
1445
1446
1447
1448
1449
1450 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1451 {
1452 unsigned long r2o, *r2e, *r3t;
1453 struct page *page;
1454
1455 BUG_ON(!gmap_is_shadow(sg));
1456 r2e = gmap_table_walk(sg, raddr, 3);
1457 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1458 return;
1459 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1460 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1461 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1462 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1463 *r2e = _REGION2_ENTRY_EMPTY;
1464 __gmap_unshadow_r3t(sg, raddr, r3t);
1465
1466 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1467 list_del(&page->lru);
1468 __free_pages(page, CRST_ALLOC_ORDER);
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1480 unsigned long *r2t)
1481 {
1482 unsigned long *r3t;
1483 struct page *page;
1484 int i;
1485
1486 BUG_ON(!gmap_is_shadow(sg));
1487 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1488 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1489 continue;
1490 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1491 r2t[i] = _REGION2_ENTRY_EMPTY;
1492 __gmap_unshadow_r3t(sg, raddr, r3t);
1493
1494 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1495 list_del(&page->lru);
1496 __free_pages(page, CRST_ALLOC_ORDER);
1497 }
1498 }
1499
1500
1501
1502
1503
1504
1505
1506
1507 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1508 {
1509 unsigned long r1o, *r1e, *r2t;
1510 struct page *page;
1511
1512 BUG_ON(!gmap_is_shadow(sg));
1513 r1e = gmap_table_walk(sg, raddr, 4);
1514 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1515 return;
1516 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1517 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1518 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1519 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1520 *r1e = _REGION1_ENTRY_EMPTY;
1521 __gmap_unshadow_r2t(sg, raddr, r2t);
1522
1523 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1524 list_del(&page->lru);
1525 __free_pages(page, CRST_ALLOC_ORDER);
1526 }
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1537 unsigned long *r1t)
1538 {
1539 unsigned long asce, *r2t;
1540 struct page *page;
1541 int i;
1542
1543 BUG_ON(!gmap_is_shadow(sg));
1544 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1545 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1546 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1547 continue;
1548 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1549 __gmap_unshadow_r2t(sg, raddr, r2t);
1550
1551 gmap_idte_one(asce, raddr);
1552 r1t[i] = _REGION1_ENTRY_EMPTY;
1553
1554 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1555 list_del(&page->lru);
1556 __free_pages(page, CRST_ALLOC_ORDER);
1557 }
1558 }
1559
1560
1561
1562
1563
1564
1565
1566 static void gmap_unshadow(struct gmap *sg)
1567 {
1568 unsigned long *table;
1569
1570 BUG_ON(!gmap_is_shadow(sg));
1571 if (sg->removed)
1572 return;
1573 sg->removed = 1;
1574 gmap_call_notifier(sg, 0, -1UL);
1575 gmap_flush_tlb(sg);
1576 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1577 switch (sg->asce & _ASCE_TYPE_MASK) {
1578 case _ASCE_TYPE_REGION1:
1579 __gmap_unshadow_r1t(sg, 0, table);
1580 break;
1581 case _ASCE_TYPE_REGION2:
1582 __gmap_unshadow_r2t(sg, 0, table);
1583 break;
1584 case _ASCE_TYPE_REGION3:
1585 __gmap_unshadow_r3t(sg, 0, table);
1586 break;
1587 case _ASCE_TYPE_SEGMENT:
1588 __gmap_unshadow_sgt(sg, 0, table);
1589 break;
1590 }
1591 }
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1604 int edat_level)
1605 {
1606 struct gmap *sg;
1607
1608 list_for_each_entry(sg, &parent->children, list) {
1609 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1610 sg->removed)
1611 continue;
1612 if (!sg->initialized)
1613 return ERR_PTR(-EAGAIN);
1614 refcount_inc(&sg->ref_count);
1615 return sg;
1616 }
1617 return NULL;
1618 }
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1633 {
1634 if (sg->removed)
1635 return 0;
1636 return sg->orig_asce == asce && sg->edat_level == edat_level;
1637 }
1638 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1656 int edat_level)
1657 {
1658 struct gmap *sg, *new;
1659 unsigned long limit;
1660 int rc;
1661
1662 BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
1663 BUG_ON(gmap_is_shadow(parent));
1664 spin_lock(&parent->shadow_lock);
1665 sg = gmap_find_shadow(parent, asce, edat_level);
1666 spin_unlock(&parent->shadow_lock);
1667 if (sg)
1668 return sg;
1669
1670 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1671 if (asce & _ASCE_REAL_SPACE)
1672 limit = -1UL;
1673 new = gmap_alloc(limit);
1674 if (!new)
1675 return ERR_PTR(-ENOMEM);
1676 new->mm = parent->mm;
1677 new->parent = gmap_get(parent);
1678 new->orig_asce = asce;
1679 new->edat_level = edat_level;
1680 new->initialized = false;
1681 spin_lock(&parent->shadow_lock);
1682
1683 sg = gmap_find_shadow(parent, asce, edat_level);
1684 if (sg) {
1685 spin_unlock(&parent->shadow_lock);
1686 gmap_free(new);
1687 return sg;
1688 }
1689 if (asce & _ASCE_REAL_SPACE) {
1690
1691 list_for_each_entry(sg, &parent->children, list) {
1692 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1693 spin_lock(&sg->guest_table_lock);
1694 gmap_unshadow(sg);
1695 spin_unlock(&sg->guest_table_lock);
1696 list_del(&sg->list);
1697 gmap_put(sg);
1698 break;
1699 }
1700 }
1701 }
1702 refcount_set(&new->ref_count, 2);
1703 list_add(&new->list, &parent->children);
1704 if (asce & _ASCE_REAL_SPACE) {
1705
1706 new->initialized = true;
1707 spin_unlock(&parent->shadow_lock);
1708 return new;
1709 }
1710 spin_unlock(&parent->shadow_lock);
1711
1712 mmap_read_lock(parent->mm);
1713 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1714 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1715 PROT_READ, GMAP_NOTIFY_SHADOW);
1716 mmap_read_unlock(parent->mm);
1717 spin_lock(&parent->shadow_lock);
1718 new->initialized = true;
1719 if (rc) {
1720 list_del(&new->list);
1721 gmap_free(new);
1722 new = ERR_PTR(rc);
1723 }
1724 spin_unlock(&parent->shadow_lock);
1725 return new;
1726 }
1727 EXPORT_SYMBOL_GPL(gmap_shadow);
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1748 int fake)
1749 {
1750 unsigned long raddr, origin, offset, len;
1751 unsigned long *s_r2t, *table;
1752 struct page *page;
1753 int rc;
1754
1755 BUG_ON(!gmap_is_shadow(sg));
1756
1757 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1758 if (!page)
1759 return -ENOMEM;
1760 page->index = r2t & _REGION_ENTRY_ORIGIN;
1761 if (fake)
1762 page->index |= GMAP_SHADOW_FAKE_TABLE;
1763 s_r2t = (unsigned long *) page_to_phys(page);
1764
1765 spin_lock(&sg->guest_table_lock);
1766 table = gmap_table_walk(sg, saddr, 4);
1767 if (!table) {
1768 rc = -EAGAIN;
1769 goto out_free;
1770 }
1771 if (!(*table & _REGION_ENTRY_INVALID)) {
1772 rc = 0;
1773 goto out_free;
1774 } else if (*table & _REGION_ENTRY_ORIGIN) {
1775 rc = -EAGAIN;
1776 goto out_free;
1777 }
1778 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1779
1780 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1781 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1782 if (sg->edat_level >= 1)
1783 *table |= (r2t & _REGION_ENTRY_PROTECT);
1784 list_add(&page->lru, &sg->crst_list);
1785 if (fake) {
1786
1787 *table &= ~_REGION_ENTRY_INVALID;
1788 spin_unlock(&sg->guest_table_lock);
1789 return 0;
1790 }
1791 spin_unlock(&sg->guest_table_lock);
1792
1793 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1794 origin = r2t & _REGION_ENTRY_ORIGIN;
1795 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1796 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1797 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1798 spin_lock(&sg->guest_table_lock);
1799 if (!rc) {
1800 table = gmap_table_walk(sg, saddr, 4);
1801 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1802 (unsigned long) s_r2t)
1803 rc = -EAGAIN;
1804 else
1805 *table &= ~_REGION_ENTRY_INVALID;
1806 } else {
1807 gmap_unshadow_r2t(sg, raddr);
1808 }
1809 spin_unlock(&sg->guest_table_lock);
1810 return rc;
1811 out_free:
1812 spin_unlock(&sg->guest_table_lock);
1813 __free_pages(page, CRST_ALLOC_ORDER);
1814 return rc;
1815 }
1816 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1832 int fake)
1833 {
1834 unsigned long raddr, origin, offset, len;
1835 unsigned long *s_r3t, *table;
1836 struct page *page;
1837 int rc;
1838
1839 BUG_ON(!gmap_is_shadow(sg));
1840
1841 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1842 if (!page)
1843 return -ENOMEM;
1844 page->index = r3t & _REGION_ENTRY_ORIGIN;
1845 if (fake)
1846 page->index |= GMAP_SHADOW_FAKE_TABLE;
1847 s_r3t = (unsigned long *) page_to_phys(page);
1848
1849 spin_lock(&sg->guest_table_lock);
1850 table = gmap_table_walk(sg, saddr, 3);
1851 if (!table) {
1852 rc = -EAGAIN;
1853 goto out_free;
1854 }
1855 if (!(*table & _REGION_ENTRY_INVALID)) {
1856 rc = 0;
1857 goto out_free;
1858 } else if (*table & _REGION_ENTRY_ORIGIN) {
1859 rc = -EAGAIN;
1860 goto out_free;
1861 }
1862 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1863
1864 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1865 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1866 if (sg->edat_level >= 1)
1867 *table |= (r3t & _REGION_ENTRY_PROTECT);
1868 list_add(&page->lru, &sg->crst_list);
1869 if (fake) {
1870
1871 *table &= ~_REGION_ENTRY_INVALID;
1872 spin_unlock(&sg->guest_table_lock);
1873 return 0;
1874 }
1875 spin_unlock(&sg->guest_table_lock);
1876
1877 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1878 origin = r3t & _REGION_ENTRY_ORIGIN;
1879 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1880 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1881 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1882 spin_lock(&sg->guest_table_lock);
1883 if (!rc) {
1884 table = gmap_table_walk(sg, saddr, 3);
1885 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1886 (unsigned long) s_r3t)
1887 rc = -EAGAIN;
1888 else
1889 *table &= ~_REGION_ENTRY_INVALID;
1890 } else {
1891 gmap_unshadow_r3t(sg, raddr);
1892 }
1893 spin_unlock(&sg->guest_table_lock);
1894 return rc;
1895 out_free:
1896 spin_unlock(&sg->guest_table_lock);
1897 __free_pages(page, CRST_ALLOC_ORDER);
1898 return rc;
1899 }
1900 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1916 int fake)
1917 {
1918 unsigned long raddr, origin, offset, len;
1919 unsigned long *s_sgt, *table;
1920 struct page *page;
1921 int rc;
1922
1923 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1924
1925 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1926 if (!page)
1927 return -ENOMEM;
1928 page->index = sgt & _REGION_ENTRY_ORIGIN;
1929 if (fake)
1930 page->index |= GMAP_SHADOW_FAKE_TABLE;
1931 s_sgt = (unsigned long *) page_to_phys(page);
1932
1933 spin_lock(&sg->guest_table_lock);
1934 table = gmap_table_walk(sg, saddr, 2);
1935 if (!table) {
1936 rc = -EAGAIN;
1937 goto out_free;
1938 }
1939 if (!(*table & _REGION_ENTRY_INVALID)) {
1940 rc = 0;
1941 goto out_free;
1942 } else if (*table & _REGION_ENTRY_ORIGIN) {
1943 rc = -EAGAIN;
1944 goto out_free;
1945 }
1946 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1947
1948 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1949 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1950 if (sg->edat_level >= 1)
1951 *table |= sgt & _REGION_ENTRY_PROTECT;
1952 list_add(&page->lru, &sg->crst_list);
1953 if (fake) {
1954
1955 *table &= ~_REGION_ENTRY_INVALID;
1956 spin_unlock(&sg->guest_table_lock);
1957 return 0;
1958 }
1959 spin_unlock(&sg->guest_table_lock);
1960
1961 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1962 origin = sgt & _REGION_ENTRY_ORIGIN;
1963 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1964 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1965 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1966 spin_lock(&sg->guest_table_lock);
1967 if (!rc) {
1968 table = gmap_table_walk(sg, saddr, 2);
1969 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1970 (unsigned long) s_sgt)
1971 rc = -EAGAIN;
1972 else
1973 *table &= ~_REGION_ENTRY_INVALID;
1974 } else {
1975 gmap_unshadow_sgt(sg, raddr);
1976 }
1977 spin_unlock(&sg->guest_table_lock);
1978 return rc;
1979 out_free:
1980 spin_unlock(&sg->guest_table_lock);
1981 __free_pages(page, CRST_ALLOC_ORDER);
1982 return rc;
1983 }
1984 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
2000 unsigned long *pgt, int *dat_protection,
2001 int *fake)
2002 {
2003 unsigned long *table;
2004 struct page *page;
2005 int rc;
2006
2007 BUG_ON(!gmap_is_shadow(sg));
2008 spin_lock(&sg->guest_table_lock);
2009 table = gmap_table_walk(sg, saddr, 1);
2010 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
2011
2012 page = pfn_to_page(*table >> PAGE_SHIFT);
2013 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
2014 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
2015 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
2016 rc = 0;
2017 } else {
2018 rc = -EAGAIN;
2019 }
2020 spin_unlock(&sg->guest_table_lock);
2021 return rc;
2022
2023 }
2024 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2040 int fake)
2041 {
2042 unsigned long raddr, origin;
2043 unsigned long *s_pgt, *table;
2044 struct page *page;
2045 int rc;
2046
2047 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
2048
2049 page = page_table_alloc_pgste(sg->mm);
2050 if (!page)
2051 return -ENOMEM;
2052 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
2053 if (fake)
2054 page->index |= GMAP_SHADOW_FAKE_TABLE;
2055 s_pgt = (unsigned long *) page_to_phys(page);
2056
2057 spin_lock(&sg->guest_table_lock);
2058 table = gmap_table_walk(sg, saddr, 1);
2059 if (!table) {
2060 rc = -EAGAIN;
2061 goto out_free;
2062 }
2063 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
2064 rc = 0;
2065 goto out_free;
2066 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
2067 rc = -EAGAIN;
2068 goto out_free;
2069 }
2070
2071 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
2072 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
2073 list_add(&page->lru, &sg->pt_list);
2074 if (fake) {
2075
2076 *table &= ~_SEGMENT_ENTRY_INVALID;
2077 spin_unlock(&sg->guest_table_lock);
2078 return 0;
2079 }
2080 spin_unlock(&sg->guest_table_lock);
2081
2082 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
2083 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
2084 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
2085 spin_lock(&sg->guest_table_lock);
2086 if (!rc) {
2087 table = gmap_table_walk(sg, saddr, 1);
2088 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
2089 (unsigned long) s_pgt)
2090 rc = -EAGAIN;
2091 else
2092 *table &= ~_SEGMENT_ENTRY_INVALID;
2093 } else {
2094 gmap_unshadow_pgt(sg, raddr);
2095 }
2096 spin_unlock(&sg->guest_table_lock);
2097 return rc;
2098 out_free:
2099 spin_unlock(&sg->guest_table_lock);
2100 page_table_free_pgste(page);
2101 return rc;
2102
2103 }
2104 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
2119 {
2120 struct gmap *parent;
2121 struct gmap_rmap *rmap;
2122 unsigned long vmaddr, paddr;
2123 spinlock_t *ptl;
2124 pte_t *sptep, *tptep;
2125 int prot;
2126 int rc;
2127
2128 BUG_ON(!gmap_is_shadow(sg));
2129 parent = sg->parent;
2130 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
2131
2132 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
2133 if (!rmap)
2134 return -ENOMEM;
2135 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
2136
2137 while (1) {
2138 paddr = pte_val(pte) & PAGE_MASK;
2139 vmaddr = __gmap_translate(parent, paddr);
2140 if (IS_ERR_VALUE(vmaddr)) {
2141 rc = vmaddr;
2142 break;
2143 }
2144 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
2145 if (rc)
2146 break;
2147 rc = -EAGAIN;
2148 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
2149 if (sptep) {
2150 spin_lock(&sg->guest_table_lock);
2151
2152 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
2153 if (!tptep) {
2154 spin_unlock(&sg->guest_table_lock);
2155 gmap_pte_op_end(ptl);
2156 radix_tree_preload_end();
2157 break;
2158 }
2159 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
2160 if (rc > 0) {
2161
2162 gmap_insert_rmap(sg, vmaddr, rmap);
2163 rmap = NULL;
2164 rc = 0;
2165 }
2166 gmap_pte_op_end(ptl);
2167 spin_unlock(&sg->guest_table_lock);
2168 }
2169 radix_tree_preload_end();
2170 if (!rc)
2171 break;
2172 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2173 if (rc)
2174 break;
2175 }
2176 kfree(rmap);
2177 return rc;
2178 }
2179 EXPORT_SYMBOL_GPL(gmap_shadow_page);
2180
2181
2182
2183
2184
2185
2186 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2187 unsigned long gaddr)
2188 {
2189 struct gmap_rmap *rmap, *rnext, *head;
2190 unsigned long start, end, bits, raddr;
2191
2192 BUG_ON(!gmap_is_shadow(sg));
2193
2194 spin_lock(&sg->guest_table_lock);
2195 if (sg->removed) {
2196 spin_unlock(&sg->guest_table_lock);
2197 return;
2198 }
2199
2200 start = sg->orig_asce & _ASCE_ORIGIN;
2201 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2202 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2203 gaddr < end) {
2204
2205 gmap_unshadow(sg);
2206 spin_unlock(&sg->guest_table_lock);
2207 list_del(&sg->list);
2208 gmap_put(sg);
2209 return;
2210 }
2211
2212 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2213 gmap_for_each_rmap_safe(rmap, rnext, head) {
2214 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2215 raddr = rmap->raddr ^ bits;
2216 switch (bits) {
2217 case _SHADOW_RMAP_REGION1:
2218 gmap_unshadow_r2t(sg, raddr);
2219 break;
2220 case _SHADOW_RMAP_REGION2:
2221 gmap_unshadow_r3t(sg, raddr);
2222 break;
2223 case _SHADOW_RMAP_REGION3:
2224 gmap_unshadow_sgt(sg, raddr);
2225 break;
2226 case _SHADOW_RMAP_SEGMENT:
2227 gmap_unshadow_pgt(sg, raddr);
2228 break;
2229 case _SHADOW_RMAP_PGTABLE:
2230 gmap_unshadow_page(sg, raddr);
2231 break;
2232 }
2233 kfree(rmap);
2234 }
2235 spin_unlock(&sg->guest_table_lock);
2236 }
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2249 pte_t *pte, unsigned long bits)
2250 {
2251 unsigned long offset, gaddr = 0;
2252 unsigned long *table;
2253 struct gmap *gmap, *sg, *next;
2254
2255 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2256 offset = offset * (PAGE_SIZE / sizeof(pte_t));
2257 rcu_read_lock();
2258 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2259 spin_lock(&gmap->guest_table_lock);
2260 table = radix_tree_lookup(&gmap->host_to_guest,
2261 vmaddr >> PMD_SHIFT);
2262 if (table)
2263 gaddr = __gmap_segment_gaddr(table) + offset;
2264 spin_unlock(&gmap->guest_table_lock);
2265 if (!table)
2266 continue;
2267
2268 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2269 spin_lock(&gmap->shadow_lock);
2270 list_for_each_entry_safe(sg, next,
2271 &gmap->children, list)
2272 gmap_shadow_notify(sg, vmaddr, gaddr);
2273 spin_unlock(&gmap->shadow_lock);
2274 }
2275 if (bits & PGSTE_IN_BIT)
2276 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2277 }
2278 rcu_read_unlock();
2279 }
2280 EXPORT_SYMBOL_GPL(ptep_notify);
2281
2282 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2283 unsigned long gaddr)
2284 {
2285 set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
2286 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2287 }
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2300 unsigned long gaddr)
2301 {
2302 gaddr &= HPAGE_MASK;
2303 pmdp_notify_gmap(gmap, pmdp, gaddr);
2304 new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN));
2305 if (MACHINE_HAS_TLB_GUEST)
2306 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2307 IDTE_GLOBAL);
2308 else if (MACHINE_HAS_IDTE)
2309 __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
2310 else
2311 __pmdp_csp(pmdp);
2312 set_pmd(pmdp, new);
2313 }
2314
2315 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2316 int purge)
2317 {
2318 pmd_t *pmdp;
2319 struct gmap *gmap;
2320 unsigned long gaddr;
2321
2322 rcu_read_lock();
2323 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2324 spin_lock(&gmap->guest_table_lock);
2325 pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2326 vmaddr >> PMD_SHIFT);
2327 if (pmdp) {
2328 gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
2329 pmdp_notify_gmap(gmap, pmdp, gaddr);
2330 WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2331 _SEGMENT_ENTRY_GMAP_UC));
2332 if (purge)
2333 __pmdp_csp(pmdp);
2334 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
2335 }
2336 spin_unlock(&gmap->guest_table_lock);
2337 }
2338 rcu_read_unlock();
2339 }
2340
2341
2342
2343
2344
2345
2346
2347 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
2348 {
2349 gmap_pmdp_clear(mm, vmaddr, 0);
2350 }
2351 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
2352
2353
2354
2355
2356
2357
2358 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
2359 {
2360 gmap_pmdp_clear(mm, vmaddr, 1);
2361 }
2362 EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2363
2364
2365
2366
2367
2368
2369 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
2370 {
2371 unsigned long *entry, gaddr;
2372 struct gmap *gmap;
2373 pmd_t *pmdp;
2374
2375 rcu_read_lock();
2376 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2377 spin_lock(&gmap->guest_table_lock);
2378 entry = radix_tree_delete(&gmap->host_to_guest,
2379 vmaddr >> PMD_SHIFT);
2380 if (entry) {
2381 pmdp = (pmd_t *)entry;
2382 gaddr = __gmap_segment_gaddr(entry);
2383 pmdp_notify_gmap(gmap, pmdp, gaddr);
2384 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2385 _SEGMENT_ENTRY_GMAP_UC));
2386 if (MACHINE_HAS_TLB_GUEST)
2387 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2388 gmap->asce, IDTE_LOCAL);
2389 else if (MACHINE_HAS_IDTE)
2390 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2391 *entry = _SEGMENT_ENTRY_EMPTY;
2392 }
2393 spin_unlock(&gmap->guest_table_lock);
2394 }
2395 rcu_read_unlock();
2396 }
2397 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2398
2399
2400
2401
2402
2403
2404 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
2405 {
2406 unsigned long *entry, gaddr;
2407 struct gmap *gmap;
2408 pmd_t *pmdp;
2409
2410 rcu_read_lock();
2411 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2412 spin_lock(&gmap->guest_table_lock);
2413 entry = radix_tree_delete(&gmap->host_to_guest,
2414 vmaddr >> PMD_SHIFT);
2415 if (entry) {
2416 pmdp = (pmd_t *)entry;
2417 gaddr = __gmap_segment_gaddr(entry);
2418 pmdp_notify_gmap(gmap, pmdp, gaddr);
2419 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2420 _SEGMENT_ENTRY_GMAP_UC));
2421 if (MACHINE_HAS_TLB_GUEST)
2422 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2423 gmap->asce, IDTE_GLOBAL);
2424 else if (MACHINE_HAS_IDTE)
2425 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
2426 else
2427 __pmdp_csp(pmdp);
2428 *entry = _SEGMENT_ENTRY_EMPTY;
2429 }
2430 spin_unlock(&gmap->guest_table_lock);
2431 }
2432 rcu_read_unlock();
2433 }
2434 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445 static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2446 unsigned long gaddr)
2447 {
2448 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2449 return false;
2450
2451
2452 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
2453 !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
2454 return false;
2455
2456
2457 set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC)));
2458 gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2459 return true;
2460 }
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2473 unsigned long gaddr, unsigned long vmaddr)
2474 {
2475 int i;
2476 pmd_t *pmdp;
2477 pte_t *ptep;
2478 spinlock_t *ptl;
2479
2480 pmdp = gmap_pmd_op_walk(gmap, gaddr);
2481 if (!pmdp)
2482 return;
2483
2484 if (pmd_large(*pmdp)) {
2485 if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2486 bitmap_fill(bitmap, _PAGE_ENTRIES);
2487 } else {
2488 for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
2489 ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2490 if (!ptep)
2491 continue;
2492 if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2493 set_bit(i, bitmap);
2494 spin_unlock(ptl);
2495 }
2496 }
2497 gmap_pmd_op_end(gmap, pmdp);
2498 }
2499 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
2500
2501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2502 static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
2503 unsigned long end, struct mm_walk *walk)
2504 {
2505 struct vm_area_struct *vma = walk->vma;
2506
2507 split_huge_pmd(vma, pmd, addr);
2508 return 0;
2509 }
2510
2511 static const struct mm_walk_ops thp_split_walk_ops = {
2512 .pmd_entry = thp_split_walk_pmd_entry,
2513 };
2514
2515 static inline void thp_split_mm(struct mm_struct *mm)
2516 {
2517 struct vm_area_struct *vma;
2518
2519 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2520 vma->vm_flags &= ~VM_HUGEPAGE;
2521 vma->vm_flags |= VM_NOHUGEPAGE;
2522 walk_page_vma(vma, &thp_split_walk_ops, NULL);
2523 }
2524 mm->def_flags |= VM_NOHUGEPAGE;
2525 }
2526 #else
2527 static inline void thp_split_mm(struct mm_struct *mm)
2528 {
2529 }
2530 #endif
2531
2532
2533
2534
2535
2536
2537
2538 static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2539 unsigned long end, struct mm_walk *walk)
2540 {
2541 unsigned long addr;
2542
2543 for (addr = start; addr != end; addr += PAGE_SIZE) {
2544 pte_t *ptep;
2545 spinlock_t *ptl;
2546
2547 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2548 if (is_zero_pfn(pte_pfn(*ptep)))
2549 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2550 pte_unmap_unlock(ptep, ptl);
2551 }
2552 return 0;
2553 }
2554
2555 static const struct mm_walk_ops zap_zero_walk_ops = {
2556 .pmd_entry = __zap_zero_pages,
2557 };
2558
2559
2560
2561
2562 int s390_enable_sie(void)
2563 {
2564 struct mm_struct *mm = current->mm;
2565
2566
2567 if (mm_has_pgste(mm))
2568 return 0;
2569
2570 if (!mm_alloc_pgste(mm))
2571 return -EINVAL;
2572 mmap_write_lock(mm);
2573 mm->context.has_pgste = 1;
2574
2575 thp_split_mm(mm);
2576 walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
2577 mmap_write_unlock(mm);
2578 return 0;
2579 }
2580 EXPORT_SYMBOL_GPL(s390_enable_sie);
2581
2582 int gmap_mark_unmergeable(void)
2583 {
2584 struct mm_struct *mm = current->mm;
2585 struct vm_area_struct *vma;
2586 int ret;
2587
2588 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2589 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
2590 MADV_UNMERGEABLE, &vma->vm_flags);
2591 if (ret)
2592 return ret;
2593 }
2594 mm->def_flags &= ~VM_MERGEABLE;
2595 return 0;
2596 }
2597 EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
2598
2599
2600
2601
2602
2603 static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
2604 unsigned long next, struct mm_walk *walk)
2605 {
2606
2607 ptep_zap_key(walk->mm, addr, pte);
2608 return 0;
2609 }
2610
2611
2612
2613
2614
2615
2616 static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
2617 unsigned long next, struct mm_walk *walk)
2618 {
2619 cond_resched();
2620 return 0;
2621 }
2622
2623 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
2624 unsigned long hmask, unsigned long next,
2625 struct mm_walk *walk)
2626 {
2627 pmd_t *pmd = (pmd_t *)pte;
2628 unsigned long start, end;
2629 struct page *page = pmd_page(*pmd);
2630
2631
2632
2633
2634
2635
2636
2637 if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
2638 !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
2639 return 0;
2640
2641 start = pmd_val(*pmd) & HPAGE_MASK;
2642 end = start + HPAGE_SIZE - 1;
2643 __storage_key_init_range(start, end);
2644 set_bit(PG_arch_1, &page->flags);
2645 cond_resched();
2646 return 0;
2647 }
2648
2649 static const struct mm_walk_ops enable_skey_walk_ops = {
2650 .hugetlb_entry = __s390_enable_skey_hugetlb,
2651 .pte_entry = __s390_enable_skey_pte,
2652 .pmd_entry = __s390_enable_skey_pmd,
2653 };
2654
2655 int s390_enable_skey(void)
2656 {
2657 struct mm_struct *mm = current->mm;
2658 int rc = 0;
2659
2660 mmap_write_lock(mm);
2661 if (mm_uses_skeys(mm))
2662 goto out_up;
2663
2664 mm->context.uses_skeys = 1;
2665 rc = gmap_mark_unmergeable();
2666 if (rc) {
2667 mm->context.uses_skeys = 0;
2668 goto out_up;
2669 }
2670 walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
2671
2672 out_up:
2673 mmap_write_unlock(mm);
2674 return rc;
2675 }
2676 EXPORT_SYMBOL_GPL(s390_enable_skey);
2677
2678
2679
2680
2681 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2682 unsigned long next, struct mm_walk *walk)
2683 {
2684 ptep_zap_unused(walk->mm, addr, pte, 1);
2685 return 0;
2686 }
2687
2688 static const struct mm_walk_ops reset_cmma_walk_ops = {
2689 .pte_entry = __s390_reset_cmma,
2690 };
2691
2692 void s390_reset_cmma(struct mm_struct *mm)
2693 {
2694 mmap_write_lock(mm);
2695 walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
2696 mmap_write_unlock(mm);
2697 }
2698 EXPORT_SYMBOL_GPL(s390_reset_cmma);
2699
2700 #define GATHER_GET_PAGES 32
2701
2702 struct reset_walk_state {
2703 unsigned long next;
2704 unsigned long count;
2705 unsigned long pfns[GATHER_GET_PAGES];
2706 };
2707
2708 static int s390_gather_pages(pte_t *ptep, unsigned long addr,
2709 unsigned long next, struct mm_walk *walk)
2710 {
2711 struct reset_walk_state *p = walk->private;
2712 pte_t pte = READ_ONCE(*ptep);
2713
2714 if (pte_present(pte)) {
2715
2716 get_page(phys_to_page(pte_val(pte)));
2717 p->pfns[p->count] = phys_to_pfn(pte_val(pte));
2718 p->next = next;
2719 p->count++;
2720 }
2721 return p->count >= GATHER_GET_PAGES;
2722 }
2723
2724 static const struct mm_walk_ops gather_pages_ops = {
2725 .pte_entry = s390_gather_pages,
2726 };
2727
2728
2729
2730
2731
2732 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns)
2733 {
2734 unsigned long i;
2735
2736 for (i = 0; i < count; i++) {
2737
2738 uv_destroy_owned_page(pfn_to_phys(pfns[i]));
2739
2740 put_page(pfn_to_page(pfns[i]));
2741 cond_resched();
2742 }
2743 }
2744 EXPORT_SYMBOL_GPL(s390_uv_destroy_pfns);
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
2761 unsigned long end, bool interruptible)
2762 {
2763 struct reset_walk_state state = { .next = start };
2764 int r = 1;
2765
2766 while (r > 0) {
2767 state.count = 0;
2768 mmap_read_lock(mm);
2769 r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state);
2770 mmap_read_unlock(mm);
2771 cond_resched();
2772 s390_uv_destroy_pfns(state.count, state.pfns);
2773 if (interruptible && fatal_signal_pending(current))
2774 return -EINTR;
2775 }
2776 return 0;
2777 }
2778 EXPORT_SYMBOL_GPL(__s390_uv_destroy_range);
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797 void s390_unlist_old_asce(struct gmap *gmap)
2798 {
2799 struct page *old;
2800
2801 old = virt_to_page(gmap->table);
2802 spin_lock(&gmap->guest_table_lock);
2803 list_del(&old->lru);
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818 INIT_LIST_HEAD(&old->lru);
2819 spin_unlock(&gmap->guest_table_lock);
2820 }
2821 EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833 int s390_replace_asce(struct gmap *gmap)
2834 {
2835 unsigned long asce;
2836 struct page *page;
2837 void *table;
2838
2839 s390_unlist_old_asce(gmap);
2840
2841 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
2842 if (!page)
2843 return -ENOMEM;
2844 table = page_to_virt(page);
2845 memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
2846
2847
2848
2849
2850
2851
2852 spin_lock(&gmap->guest_table_lock);
2853 list_add(&page->lru, &gmap->crst_list);
2854 spin_unlock(&gmap->guest_table_lock);
2855
2856
2857 asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table);
2858 WRITE_ONCE(gmap->asce, asce);
2859 WRITE_ONCE(gmap->mm->context.gmap_asce, asce);
2860 WRITE_ONCE(gmap->table, table);
2861
2862 return 0;
2863 }
2864 EXPORT_SYMBOL_GPL(s390_replace_asce);