Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  KVM guest address space mapping code
0004  *
0005  *    Copyright IBM Corp. 2007, 2020
0006  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
0007  *       David Hildenbrand <david@redhat.com>
0008  *       Janosch Frank <frankja@linux.vnet.ibm.com>
0009  */
0010 
0011 #include <linux/kernel.h>
0012 #include <linux/pagewalk.h>
0013 #include <linux/swap.h>
0014 #include <linux/smp.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/slab.h>
0017 #include <linux/swapops.h>
0018 #include <linux/ksm.h>
0019 #include <linux/mman.h>
0020 #include <linux/pgtable.h>
0021 
0022 #include <asm/pgalloc.h>
0023 #include <asm/gmap.h>
0024 #include <asm/tlb.h>
0025 
0026 #define GMAP_SHADOW_FAKE_TABLE 1ULL
0027 
0028 /**
0029  * gmap_alloc - allocate and initialize a guest address space
0030  * @limit: maximum address of the gmap address space
0031  *
0032  * Returns a guest address space structure.
0033  */
0034 static struct gmap *gmap_alloc(unsigned long limit)
0035 {
0036     struct gmap *gmap;
0037     struct page *page;
0038     unsigned long *table;
0039     unsigned long etype, atype;
0040 
0041     if (limit < _REGION3_SIZE) {
0042         limit = _REGION3_SIZE - 1;
0043         atype = _ASCE_TYPE_SEGMENT;
0044         etype = _SEGMENT_ENTRY_EMPTY;
0045     } else if (limit < _REGION2_SIZE) {
0046         limit = _REGION2_SIZE - 1;
0047         atype = _ASCE_TYPE_REGION3;
0048         etype = _REGION3_ENTRY_EMPTY;
0049     } else if (limit < _REGION1_SIZE) {
0050         limit = _REGION1_SIZE - 1;
0051         atype = _ASCE_TYPE_REGION2;
0052         etype = _REGION2_ENTRY_EMPTY;
0053     } else {
0054         limit = -1UL;
0055         atype = _ASCE_TYPE_REGION1;
0056         etype = _REGION1_ENTRY_EMPTY;
0057     }
0058     gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
0059     if (!gmap)
0060         goto out;
0061     INIT_LIST_HEAD(&gmap->crst_list);
0062     INIT_LIST_HEAD(&gmap->children);
0063     INIT_LIST_HEAD(&gmap->pt_list);
0064     INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
0065     INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
0066     INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
0067     spin_lock_init(&gmap->guest_table_lock);
0068     spin_lock_init(&gmap->shadow_lock);
0069     refcount_set(&gmap->ref_count, 1);
0070     page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
0071     if (!page)
0072         goto out_free;
0073     page->index = 0;
0074     list_add(&page->lru, &gmap->crst_list);
0075     table = (unsigned long *) page_to_phys(page);
0076     crst_table_init(table, etype);
0077     gmap->table = table;
0078     gmap->asce = atype | _ASCE_TABLE_LENGTH |
0079         _ASCE_USER_BITS | __pa(table);
0080     gmap->asce_end = limit;
0081     return gmap;
0082 
0083 out_free:
0084     kfree(gmap);
0085 out:
0086     return NULL;
0087 }
0088 
0089 /**
0090  * gmap_create - create a guest address space
0091  * @mm: pointer to the parent mm_struct
0092  * @limit: maximum size of the gmap address space
0093  *
0094  * Returns a guest address space structure.
0095  */
0096 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
0097 {
0098     struct gmap *gmap;
0099     unsigned long gmap_asce;
0100 
0101     gmap = gmap_alloc(limit);
0102     if (!gmap)
0103         return NULL;
0104     gmap->mm = mm;
0105     spin_lock(&mm->context.lock);
0106     list_add_rcu(&gmap->list, &mm->context.gmap_list);
0107     if (list_is_singular(&mm->context.gmap_list))
0108         gmap_asce = gmap->asce;
0109     else
0110         gmap_asce = -1UL;
0111     WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
0112     spin_unlock(&mm->context.lock);
0113     return gmap;
0114 }
0115 EXPORT_SYMBOL_GPL(gmap_create);
0116 
0117 static void gmap_flush_tlb(struct gmap *gmap)
0118 {
0119     if (MACHINE_HAS_IDTE)
0120         __tlb_flush_idte(gmap->asce);
0121     else
0122         __tlb_flush_global();
0123 }
0124 
0125 static void gmap_radix_tree_free(struct radix_tree_root *root)
0126 {
0127     struct radix_tree_iter iter;
0128     unsigned long indices[16];
0129     unsigned long index;
0130     void __rcu **slot;
0131     int i, nr;
0132 
0133     /* A radix tree is freed by deleting all of its entries */
0134     index = 0;
0135     do {
0136         nr = 0;
0137         radix_tree_for_each_slot(slot, root, &iter, index) {
0138             indices[nr] = iter.index;
0139             if (++nr == 16)
0140                 break;
0141         }
0142         for (i = 0; i < nr; i++) {
0143             index = indices[i];
0144             radix_tree_delete(root, index);
0145         }
0146     } while (nr > 0);
0147 }
0148 
0149 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
0150 {
0151     struct gmap_rmap *rmap, *rnext, *head;
0152     struct radix_tree_iter iter;
0153     unsigned long indices[16];
0154     unsigned long index;
0155     void __rcu **slot;
0156     int i, nr;
0157 
0158     /* A radix tree is freed by deleting all of its entries */
0159     index = 0;
0160     do {
0161         nr = 0;
0162         radix_tree_for_each_slot(slot, root, &iter, index) {
0163             indices[nr] = iter.index;
0164             if (++nr == 16)
0165                 break;
0166         }
0167         for (i = 0; i < nr; i++) {
0168             index = indices[i];
0169             head = radix_tree_delete(root, index);
0170             gmap_for_each_rmap_safe(rmap, rnext, head)
0171                 kfree(rmap);
0172         }
0173     } while (nr > 0);
0174 }
0175 
0176 /**
0177  * gmap_free - free a guest address space
0178  * @gmap: pointer to the guest address space structure
0179  *
0180  * No locks required. There are no references to this gmap anymore.
0181  */
0182 static void gmap_free(struct gmap *gmap)
0183 {
0184     struct page *page, *next;
0185 
0186     /* Flush tlb of all gmaps (if not already done for shadows) */
0187     if (!(gmap_is_shadow(gmap) && gmap->removed))
0188         gmap_flush_tlb(gmap);
0189     /* Free all segment & region tables. */
0190     list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
0191         __free_pages(page, CRST_ALLOC_ORDER);
0192     gmap_radix_tree_free(&gmap->guest_to_host);
0193     gmap_radix_tree_free(&gmap->host_to_guest);
0194 
0195     /* Free additional data for a shadow gmap */
0196     if (gmap_is_shadow(gmap)) {
0197         /* Free all page tables. */
0198         list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
0199             page_table_free_pgste(page);
0200         gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
0201         /* Release reference to the parent */
0202         gmap_put(gmap->parent);
0203     }
0204 
0205     kfree(gmap);
0206 }
0207 
0208 /**
0209  * gmap_get - increase reference counter for guest address space
0210  * @gmap: pointer to the guest address space structure
0211  *
0212  * Returns the gmap pointer
0213  */
0214 struct gmap *gmap_get(struct gmap *gmap)
0215 {
0216     refcount_inc(&gmap->ref_count);
0217     return gmap;
0218 }
0219 EXPORT_SYMBOL_GPL(gmap_get);
0220 
0221 /**
0222  * gmap_put - decrease reference counter for guest address space
0223  * @gmap: pointer to the guest address space structure
0224  *
0225  * If the reference counter reaches zero the guest address space is freed.
0226  */
0227 void gmap_put(struct gmap *gmap)
0228 {
0229     if (refcount_dec_and_test(&gmap->ref_count))
0230         gmap_free(gmap);
0231 }
0232 EXPORT_SYMBOL_GPL(gmap_put);
0233 
0234 /**
0235  * gmap_remove - remove a guest address space but do not free it yet
0236  * @gmap: pointer to the guest address space structure
0237  */
0238 void gmap_remove(struct gmap *gmap)
0239 {
0240     struct gmap *sg, *next;
0241     unsigned long gmap_asce;
0242 
0243     /* Remove all shadow gmaps linked to this gmap */
0244     if (!list_empty(&gmap->children)) {
0245         spin_lock(&gmap->shadow_lock);
0246         list_for_each_entry_safe(sg, next, &gmap->children, list) {
0247             list_del(&sg->list);
0248             gmap_put(sg);
0249         }
0250         spin_unlock(&gmap->shadow_lock);
0251     }
0252     /* Remove gmap from the pre-mm list */
0253     spin_lock(&gmap->mm->context.lock);
0254     list_del_rcu(&gmap->list);
0255     if (list_empty(&gmap->mm->context.gmap_list))
0256         gmap_asce = 0;
0257     else if (list_is_singular(&gmap->mm->context.gmap_list))
0258         gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
0259                          struct gmap, list)->asce;
0260     else
0261         gmap_asce = -1UL;
0262     WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
0263     spin_unlock(&gmap->mm->context.lock);
0264     synchronize_rcu();
0265     /* Put reference */
0266     gmap_put(gmap);
0267 }
0268 EXPORT_SYMBOL_GPL(gmap_remove);
0269 
0270 /**
0271  * gmap_enable - switch primary space to the guest address space
0272  * @gmap: pointer to the guest address space structure
0273  */
0274 void gmap_enable(struct gmap *gmap)
0275 {
0276     S390_lowcore.gmap = (unsigned long) gmap;
0277 }
0278 EXPORT_SYMBOL_GPL(gmap_enable);
0279 
0280 /**
0281  * gmap_disable - switch back to the standard primary address space
0282  * @gmap: pointer to the guest address space structure
0283  */
0284 void gmap_disable(struct gmap *gmap)
0285 {
0286     S390_lowcore.gmap = 0UL;
0287 }
0288 EXPORT_SYMBOL_GPL(gmap_disable);
0289 
0290 /**
0291  * gmap_get_enabled - get a pointer to the currently enabled gmap
0292  *
0293  * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
0294  */
0295 struct gmap *gmap_get_enabled(void)
0296 {
0297     return (struct gmap *) S390_lowcore.gmap;
0298 }
0299 EXPORT_SYMBOL_GPL(gmap_get_enabled);
0300 
0301 /*
0302  * gmap_alloc_table is assumed to be called with mmap_lock held
0303  */
0304 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
0305                 unsigned long init, unsigned long gaddr)
0306 {
0307     struct page *page;
0308     unsigned long *new;
0309 
0310     /* since we dont free the gmap table until gmap_free we can unlock */
0311     page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
0312     if (!page)
0313         return -ENOMEM;
0314     new = (unsigned long *) page_to_phys(page);
0315     crst_table_init(new, init);
0316     spin_lock(&gmap->guest_table_lock);
0317     if (*table & _REGION_ENTRY_INVALID) {
0318         list_add(&page->lru, &gmap->crst_list);
0319         *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
0320             (*table & _REGION_ENTRY_TYPE_MASK);
0321         page->index = gaddr;
0322         page = NULL;
0323     }
0324     spin_unlock(&gmap->guest_table_lock);
0325     if (page)
0326         __free_pages(page, CRST_ALLOC_ORDER);
0327     return 0;
0328 }
0329 
0330 /**
0331  * __gmap_segment_gaddr - find virtual address from segment pointer
0332  * @entry: pointer to a segment table entry in the guest address space
0333  *
0334  * Returns the virtual address in the guest address space for the segment
0335  */
0336 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
0337 {
0338     struct page *page;
0339     unsigned long offset, mask;
0340 
0341     offset = (unsigned long) entry / sizeof(unsigned long);
0342     offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
0343     mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
0344     page = virt_to_page((void *)((unsigned long) entry & mask));
0345     return page->index + offset;
0346 }
0347 
0348 /**
0349  * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
0350  * @gmap: pointer to the guest address space structure
0351  * @vmaddr: address in the host process address space
0352  *
0353  * Returns 1 if a TLB flush is required
0354  */
0355 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
0356 {
0357     unsigned long *entry;
0358     int flush = 0;
0359 
0360     BUG_ON(gmap_is_shadow(gmap));
0361     spin_lock(&gmap->guest_table_lock);
0362     entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
0363     if (entry) {
0364         flush = (*entry != _SEGMENT_ENTRY_EMPTY);
0365         *entry = _SEGMENT_ENTRY_EMPTY;
0366     }
0367     spin_unlock(&gmap->guest_table_lock);
0368     return flush;
0369 }
0370 
0371 /**
0372  * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
0373  * @gmap: pointer to the guest address space structure
0374  * @gaddr: address in the guest address space
0375  *
0376  * Returns 1 if a TLB flush is required
0377  */
0378 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
0379 {
0380     unsigned long vmaddr;
0381 
0382     vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
0383                            gaddr >> PMD_SHIFT);
0384     return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
0385 }
0386 
0387 /**
0388  * gmap_unmap_segment - unmap segment from the guest address space
0389  * @gmap: pointer to the guest address space structure
0390  * @to: address in the guest address space
0391  * @len: length of the memory area to unmap
0392  *
0393  * Returns 0 if the unmap succeeded, -EINVAL if not.
0394  */
0395 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
0396 {
0397     unsigned long off;
0398     int flush;
0399 
0400     BUG_ON(gmap_is_shadow(gmap));
0401     if ((to | len) & (PMD_SIZE - 1))
0402         return -EINVAL;
0403     if (len == 0 || to + len < to)
0404         return -EINVAL;
0405 
0406     flush = 0;
0407     mmap_write_lock(gmap->mm);
0408     for (off = 0; off < len; off += PMD_SIZE)
0409         flush |= __gmap_unmap_by_gaddr(gmap, to + off);
0410     mmap_write_unlock(gmap->mm);
0411     if (flush)
0412         gmap_flush_tlb(gmap);
0413     return 0;
0414 }
0415 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
0416 
0417 /**
0418  * gmap_map_segment - map a segment to the guest address space
0419  * @gmap: pointer to the guest address space structure
0420  * @from: source address in the parent address space
0421  * @to: target address in the guest address space
0422  * @len: length of the memory area to map
0423  *
0424  * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
0425  */
0426 int gmap_map_segment(struct gmap *gmap, unsigned long from,
0427              unsigned long to, unsigned long len)
0428 {
0429     unsigned long off;
0430     int flush;
0431 
0432     BUG_ON(gmap_is_shadow(gmap));
0433     if ((from | to | len) & (PMD_SIZE - 1))
0434         return -EINVAL;
0435     if (len == 0 || from + len < from || to + len < to ||
0436         from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
0437         return -EINVAL;
0438 
0439     flush = 0;
0440     mmap_write_lock(gmap->mm);
0441     for (off = 0; off < len; off += PMD_SIZE) {
0442         /* Remove old translation */
0443         flush |= __gmap_unmap_by_gaddr(gmap, to + off);
0444         /* Store new translation */
0445         if (radix_tree_insert(&gmap->guest_to_host,
0446                       (to + off) >> PMD_SHIFT,
0447                       (void *) from + off))
0448             break;
0449     }
0450     mmap_write_unlock(gmap->mm);
0451     if (flush)
0452         gmap_flush_tlb(gmap);
0453     if (off >= len)
0454         return 0;
0455     gmap_unmap_segment(gmap, to, len);
0456     return -ENOMEM;
0457 }
0458 EXPORT_SYMBOL_GPL(gmap_map_segment);
0459 
0460 /**
0461  * __gmap_translate - translate a guest address to a user space address
0462  * @gmap: pointer to guest mapping meta data structure
0463  * @gaddr: guest address
0464  *
0465  * Returns user space address which corresponds to the guest address or
0466  * -EFAULT if no such mapping exists.
0467  * This function does not establish potentially missing page table entries.
0468  * The mmap_lock of the mm that belongs to the address space must be held
0469  * when this function gets called.
0470  *
0471  * Note: Can also be called for shadow gmaps.
0472  */
0473 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
0474 {
0475     unsigned long vmaddr;
0476 
0477     vmaddr = (unsigned long)
0478         radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
0479     /* Note: guest_to_host is empty for a shadow gmap */
0480     return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
0481 }
0482 EXPORT_SYMBOL_GPL(__gmap_translate);
0483 
0484 /**
0485  * gmap_translate - translate a guest address to a user space address
0486  * @gmap: pointer to guest mapping meta data structure
0487  * @gaddr: guest address
0488  *
0489  * Returns user space address which corresponds to the guest address or
0490  * -EFAULT if no such mapping exists.
0491  * This function does not establish potentially missing page table entries.
0492  */
0493 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
0494 {
0495     unsigned long rc;
0496 
0497     mmap_read_lock(gmap->mm);
0498     rc = __gmap_translate(gmap, gaddr);
0499     mmap_read_unlock(gmap->mm);
0500     return rc;
0501 }
0502 EXPORT_SYMBOL_GPL(gmap_translate);
0503 
0504 /**
0505  * gmap_unlink - disconnect a page table from the gmap shadow tables
0506  * @mm: pointer to the parent mm_struct
0507  * @table: pointer to the host page table
0508  * @vmaddr: vm address associated with the host page table
0509  */
0510 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
0511          unsigned long vmaddr)
0512 {
0513     struct gmap *gmap;
0514     int flush;
0515 
0516     rcu_read_lock();
0517     list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
0518         flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
0519         if (flush)
0520             gmap_flush_tlb(gmap);
0521     }
0522     rcu_read_unlock();
0523 }
0524 
0525 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
0526                unsigned long gaddr);
0527 
0528 /**
0529  * __gmap_link - set up shadow page tables to connect a host to a guest address
0530  * @gmap: pointer to guest mapping meta data structure
0531  * @gaddr: guest address
0532  * @vmaddr: vm address
0533  *
0534  * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
0535  * if the vm address is already mapped to a different guest segment.
0536  * The mmap_lock of the mm that belongs to the address space must be held
0537  * when this function gets called.
0538  */
0539 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
0540 {
0541     struct mm_struct *mm;
0542     unsigned long *table;
0543     spinlock_t *ptl;
0544     pgd_t *pgd;
0545     p4d_t *p4d;
0546     pud_t *pud;
0547     pmd_t *pmd;
0548     u64 unprot;
0549     int rc;
0550 
0551     BUG_ON(gmap_is_shadow(gmap));
0552     /* Create higher level tables in the gmap page table */
0553     table = gmap->table;
0554     if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
0555         table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
0556         if ((*table & _REGION_ENTRY_INVALID) &&
0557             gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
0558                      gaddr & _REGION1_MASK))
0559             return -ENOMEM;
0560         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0561     }
0562     if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
0563         table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
0564         if ((*table & _REGION_ENTRY_INVALID) &&
0565             gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
0566                      gaddr & _REGION2_MASK))
0567             return -ENOMEM;
0568         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0569     }
0570     if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
0571         table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
0572         if ((*table & _REGION_ENTRY_INVALID) &&
0573             gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
0574                      gaddr & _REGION3_MASK))
0575             return -ENOMEM;
0576         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0577     }
0578     table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
0579     /* Walk the parent mm page table */
0580     mm = gmap->mm;
0581     pgd = pgd_offset(mm, vmaddr);
0582     VM_BUG_ON(pgd_none(*pgd));
0583     p4d = p4d_offset(pgd, vmaddr);
0584     VM_BUG_ON(p4d_none(*p4d));
0585     pud = pud_offset(p4d, vmaddr);
0586     VM_BUG_ON(pud_none(*pud));
0587     /* large puds cannot yet be handled */
0588     if (pud_large(*pud))
0589         return -EFAULT;
0590     pmd = pmd_offset(pud, vmaddr);
0591     VM_BUG_ON(pmd_none(*pmd));
0592     /* Are we allowed to use huge pages? */
0593     if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
0594         return -EFAULT;
0595     /* Link gmap segment table entry location to page table. */
0596     rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
0597     if (rc)
0598         return rc;
0599     ptl = pmd_lock(mm, pmd);
0600     spin_lock(&gmap->guest_table_lock);
0601     if (*table == _SEGMENT_ENTRY_EMPTY) {
0602         rc = radix_tree_insert(&gmap->host_to_guest,
0603                        vmaddr >> PMD_SHIFT, table);
0604         if (!rc) {
0605             if (pmd_large(*pmd)) {
0606                 *table = (pmd_val(*pmd) &
0607                       _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
0608                     | _SEGMENT_ENTRY_GMAP_UC;
0609             } else
0610                 *table = pmd_val(*pmd) &
0611                     _SEGMENT_ENTRY_HARDWARE_BITS;
0612         }
0613     } else if (*table & _SEGMENT_ENTRY_PROTECT &&
0614            !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
0615         unprot = (u64)*table;
0616         unprot &= ~_SEGMENT_ENTRY_PROTECT;
0617         unprot |= _SEGMENT_ENTRY_GMAP_UC;
0618         gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
0619     }
0620     spin_unlock(&gmap->guest_table_lock);
0621     spin_unlock(ptl);
0622     radix_tree_preload_end();
0623     return rc;
0624 }
0625 
0626 /**
0627  * gmap_fault - resolve a fault on a guest address
0628  * @gmap: pointer to guest mapping meta data structure
0629  * @gaddr: guest address
0630  * @fault_flags: flags to pass down to handle_mm_fault()
0631  *
0632  * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
0633  * if the vm address is already mapped to a different guest segment.
0634  */
0635 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
0636            unsigned int fault_flags)
0637 {
0638     unsigned long vmaddr;
0639     int rc;
0640     bool unlocked;
0641 
0642     mmap_read_lock(gmap->mm);
0643 
0644 retry:
0645     unlocked = false;
0646     vmaddr = __gmap_translate(gmap, gaddr);
0647     if (IS_ERR_VALUE(vmaddr)) {
0648         rc = vmaddr;
0649         goto out_up;
0650     }
0651     if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
0652                  &unlocked)) {
0653         rc = -EFAULT;
0654         goto out_up;
0655     }
0656     /*
0657      * In the case that fixup_user_fault unlocked the mmap_lock during
0658      * faultin redo __gmap_translate to not race with a map/unmap_segment.
0659      */
0660     if (unlocked)
0661         goto retry;
0662 
0663     rc = __gmap_link(gmap, gaddr, vmaddr);
0664 out_up:
0665     mmap_read_unlock(gmap->mm);
0666     return rc;
0667 }
0668 EXPORT_SYMBOL_GPL(gmap_fault);
0669 
0670 /*
0671  * this function is assumed to be called with mmap_lock held
0672  */
0673 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
0674 {
0675     struct vm_area_struct *vma;
0676     unsigned long vmaddr;
0677     spinlock_t *ptl;
0678     pte_t *ptep;
0679 
0680     /* Find the vm address for the guest address */
0681     vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
0682                            gaddr >> PMD_SHIFT);
0683     if (vmaddr) {
0684         vmaddr |= gaddr & ~PMD_MASK;
0685 
0686         vma = vma_lookup(gmap->mm, vmaddr);
0687         if (!vma || is_vm_hugetlb_page(vma))
0688             return;
0689 
0690         /* Get pointer to the page table entry */
0691         ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
0692         if (likely(ptep)) {
0693             ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
0694             pte_unmap_unlock(ptep, ptl);
0695         }
0696     }
0697 }
0698 EXPORT_SYMBOL_GPL(__gmap_zap);
0699 
0700 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
0701 {
0702     unsigned long gaddr, vmaddr, size;
0703     struct vm_area_struct *vma;
0704 
0705     mmap_read_lock(gmap->mm);
0706     for (gaddr = from; gaddr < to;
0707          gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
0708         /* Find the vm address for the guest address */
0709         vmaddr = (unsigned long)
0710             radix_tree_lookup(&gmap->guest_to_host,
0711                       gaddr >> PMD_SHIFT);
0712         if (!vmaddr)
0713             continue;
0714         vmaddr |= gaddr & ~PMD_MASK;
0715         /* Find vma in the parent mm */
0716         vma = find_vma(gmap->mm, vmaddr);
0717         if (!vma)
0718             continue;
0719         /*
0720          * We do not discard pages that are backed by
0721          * hugetlbfs, so we don't have to refault them.
0722          */
0723         if (is_vm_hugetlb_page(vma))
0724             continue;
0725         size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
0726         zap_page_range(vma, vmaddr, size);
0727     }
0728     mmap_read_unlock(gmap->mm);
0729 }
0730 EXPORT_SYMBOL_GPL(gmap_discard);
0731 
0732 static LIST_HEAD(gmap_notifier_list);
0733 static DEFINE_SPINLOCK(gmap_notifier_lock);
0734 
0735 /**
0736  * gmap_register_pte_notifier - register a pte invalidation callback
0737  * @nb: pointer to the gmap notifier block
0738  */
0739 void gmap_register_pte_notifier(struct gmap_notifier *nb)
0740 {
0741     spin_lock(&gmap_notifier_lock);
0742     list_add_rcu(&nb->list, &gmap_notifier_list);
0743     spin_unlock(&gmap_notifier_lock);
0744 }
0745 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
0746 
0747 /**
0748  * gmap_unregister_pte_notifier - remove a pte invalidation callback
0749  * @nb: pointer to the gmap notifier block
0750  */
0751 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
0752 {
0753     spin_lock(&gmap_notifier_lock);
0754     list_del_rcu(&nb->list);
0755     spin_unlock(&gmap_notifier_lock);
0756     synchronize_rcu();
0757 }
0758 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
0759 
0760 /**
0761  * gmap_call_notifier - call all registered invalidation callbacks
0762  * @gmap: pointer to guest mapping meta data structure
0763  * @start: start virtual address in the guest address space
0764  * @end: end virtual address in the guest address space
0765  */
0766 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
0767                    unsigned long end)
0768 {
0769     struct gmap_notifier *nb;
0770 
0771     list_for_each_entry(nb, &gmap_notifier_list, list)
0772         nb->notifier_call(gmap, start, end);
0773 }
0774 
0775 /**
0776  * gmap_table_walk - walk the gmap page tables
0777  * @gmap: pointer to guest mapping meta data structure
0778  * @gaddr: virtual address in the guest address space
0779  * @level: page table level to stop at
0780  *
0781  * Returns a table entry pointer for the given guest address and @level
0782  * @level=0 : returns a pointer to a page table table entry (or NULL)
0783  * @level=1 : returns a pointer to a segment table entry (or NULL)
0784  * @level=2 : returns a pointer to a region-3 table entry (or NULL)
0785  * @level=3 : returns a pointer to a region-2 table entry (or NULL)
0786  * @level=4 : returns a pointer to a region-1 table entry (or NULL)
0787  *
0788  * Returns NULL if the gmap page tables could not be walked to the
0789  * requested level.
0790  *
0791  * Note: Can also be called for shadow gmaps.
0792  */
0793 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
0794                          unsigned long gaddr, int level)
0795 {
0796     const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
0797     unsigned long *table = gmap->table;
0798 
0799     if (gmap_is_shadow(gmap) && gmap->removed)
0800         return NULL;
0801 
0802     if (WARN_ON_ONCE(level > (asce_type >> 2) + 1))
0803         return NULL;
0804 
0805     if (asce_type != _ASCE_TYPE_REGION1 &&
0806         gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
0807         return NULL;
0808 
0809     switch (asce_type) {
0810     case _ASCE_TYPE_REGION1:
0811         table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
0812         if (level == 4)
0813             break;
0814         if (*table & _REGION_ENTRY_INVALID)
0815             return NULL;
0816         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0817         fallthrough;
0818     case _ASCE_TYPE_REGION2:
0819         table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
0820         if (level == 3)
0821             break;
0822         if (*table & _REGION_ENTRY_INVALID)
0823             return NULL;
0824         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0825         fallthrough;
0826     case _ASCE_TYPE_REGION3:
0827         table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
0828         if (level == 2)
0829             break;
0830         if (*table & _REGION_ENTRY_INVALID)
0831             return NULL;
0832         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
0833         fallthrough;
0834     case _ASCE_TYPE_SEGMENT:
0835         table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
0836         if (level == 1)
0837             break;
0838         if (*table & _REGION_ENTRY_INVALID)
0839             return NULL;
0840         table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
0841         table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
0842     }
0843     return table;
0844 }
0845 
0846 /**
0847  * gmap_pte_op_walk - walk the gmap page table, get the page table lock
0848  *            and return the pte pointer
0849  * @gmap: pointer to guest mapping meta data structure
0850  * @gaddr: virtual address in the guest address space
0851  * @ptl: pointer to the spinlock pointer
0852  *
0853  * Returns a pointer to the locked pte for a guest address, or NULL
0854  */
0855 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
0856                    spinlock_t **ptl)
0857 {
0858     unsigned long *table;
0859 
0860     BUG_ON(gmap_is_shadow(gmap));
0861     /* Walk the gmap page table, lock and get pte pointer */
0862     table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
0863     if (!table || *table & _SEGMENT_ENTRY_INVALID)
0864         return NULL;
0865     return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
0866 }
0867 
0868 /**
0869  * gmap_pte_op_fixup - force a page in and connect the gmap page table
0870  * @gmap: pointer to guest mapping meta data structure
0871  * @gaddr: virtual address in the guest address space
0872  * @vmaddr: address in the host process address space
0873  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
0874  *
0875  * Returns 0 if the caller can retry __gmap_translate (might fail again),
0876  * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
0877  * up or connecting the gmap page table.
0878  */
0879 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
0880                  unsigned long vmaddr, int prot)
0881 {
0882     struct mm_struct *mm = gmap->mm;
0883     unsigned int fault_flags;
0884     bool unlocked = false;
0885 
0886     BUG_ON(gmap_is_shadow(gmap));
0887     fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
0888     if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked))
0889         return -EFAULT;
0890     if (unlocked)
0891         /* lost mmap_lock, caller has to retry __gmap_translate */
0892         return 0;
0893     /* Connect the page tables */
0894     return __gmap_link(gmap, gaddr, vmaddr);
0895 }
0896 
0897 /**
0898  * gmap_pte_op_end - release the page table lock
0899  * @ptl: pointer to the spinlock pointer
0900  */
0901 static void gmap_pte_op_end(spinlock_t *ptl)
0902 {
0903     if (ptl)
0904         spin_unlock(ptl);
0905 }
0906 
0907 /**
0908  * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
0909  *            and return the pmd pointer
0910  * @gmap: pointer to guest mapping meta data structure
0911  * @gaddr: virtual address in the guest address space
0912  *
0913  * Returns a pointer to the pmd for a guest address, or NULL
0914  */
0915 static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
0916 {
0917     pmd_t *pmdp;
0918 
0919     BUG_ON(gmap_is_shadow(gmap));
0920     pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
0921     if (!pmdp)
0922         return NULL;
0923 
0924     /* without huge pages, there is no need to take the table lock */
0925     if (!gmap->mm->context.allow_gmap_hpage_1m)
0926         return pmd_none(*pmdp) ? NULL : pmdp;
0927 
0928     spin_lock(&gmap->guest_table_lock);
0929     if (pmd_none(*pmdp)) {
0930         spin_unlock(&gmap->guest_table_lock);
0931         return NULL;
0932     }
0933 
0934     /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
0935     if (!pmd_large(*pmdp))
0936         spin_unlock(&gmap->guest_table_lock);
0937     return pmdp;
0938 }
0939 
0940 /**
0941  * gmap_pmd_op_end - release the guest_table_lock if needed
0942  * @gmap: pointer to the guest mapping meta data structure
0943  * @pmdp: pointer to the pmd
0944  */
0945 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
0946 {
0947     if (pmd_large(*pmdp))
0948         spin_unlock(&gmap->guest_table_lock);
0949 }
0950 
0951 /*
0952  * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
0953  * @pmdp: pointer to the pmd to be protected
0954  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
0955  * @bits: notification bits to set
0956  *
0957  * Returns:
0958  * 0 if successfully protected
0959  * -EAGAIN if a fixup is needed
0960  * -EINVAL if unsupported notifier bits have been specified
0961  *
0962  * Expected to be called with sg->mm->mmap_lock in read and
0963  * guest_table_lock held.
0964  */
0965 static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
0966                 pmd_t *pmdp, int prot, unsigned long bits)
0967 {
0968     int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
0969     int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
0970     pmd_t new = *pmdp;
0971 
0972     /* Fixup needed */
0973     if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
0974         return -EAGAIN;
0975 
0976     if (prot == PROT_NONE && !pmd_i) {
0977         new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
0978         gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
0979     }
0980 
0981     if (prot == PROT_READ && !pmd_p) {
0982         new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
0983         new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_PROTECT));
0984         gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
0985     }
0986 
0987     if (bits & GMAP_NOTIFY_MPROT)
0988         set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
0989 
0990     /* Shadow GMAP protection needs split PMDs */
0991     if (bits & GMAP_NOTIFY_SHADOW)
0992         return -EINVAL;
0993 
0994     return 0;
0995 }
0996 
0997 /*
0998  * gmap_protect_pte - remove access rights to memory and set pgste bits
0999  * @gmap: pointer to guest mapping meta data structure
1000  * @gaddr: virtual address in the guest address space
1001  * @pmdp: pointer to the pmd associated with the pte
1002  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1003  * @bits: notification bits to set
1004  *
1005  * Returns 0 if successfully protected, -ENOMEM if out of memory and
1006  * -EAGAIN if a fixup is needed.
1007  *
1008  * Expected to be called with sg->mm->mmap_lock in read
1009  */
1010 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
1011                 pmd_t *pmdp, int prot, unsigned long bits)
1012 {
1013     int rc;
1014     pte_t *ptep;
1015     spinlock_t *ptl = NULL;
1016     unsigned long pbits = 0;
1017 
1018     if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1019         return -EAGAIN;
1020 
1021     ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1022     if (!ptep)
1023         return -ENOMEM;
1024 
1025     pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
1026     pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
1027     /* Protect and unlock. */
1028     rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
1029     gmap_pte_op_end(ptl);
1030     return rc;
1031 }
1032 
1033 /*
1034  * gmap_protect_range - remove access rights to memory and set pgste bits
1035  * @gmap: pointer to guest mapping meta data structure
1036  * @gaddr: virtual address in the guest address space
1037  * @len: size of area
1038  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1039  * @bits: pgste notification bits to set
1040  *
1041  * Returns 0 if successfully protected, -ENOMEM if out of memory and
1042  * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
1043  *
1044  * Called with sg->mm->mmap_lock in read.
1045  */
1046 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1047                   unsigned long len, int prot, unsigned long bits)
1048 {
1049     unsigned long vmaddr, dist;
1050     pmd_t *pmdp;
1051     int rc;
1052 
1053     BUG_ON(gmap_is_shadow(gmap));
1054     while (len) {
1055         rc = -EAGAIN;
1056         pmdp = gmap_pmd_op_walk(gmap, gaddr);
1057         if (pmdp) {
1058             if (!pmd_large(*pmdp)) {
1059                 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1060                               bits);
1061                 if (!rc) {
1062                     len -= PAGE_SIZE;
1063                     gaddr += PAGE_SIZE;
1064                 }
1065             } else {
1066                 rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1067                               bits);
1068                 if (!rc) {
1069                     dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
1070                     len = len < dist ? 0 : len - dist;
1071                     gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
1072                 }
1073             }
1074             gmap_pmd_op_end(gmap, pmdp);
1075         }
1076         if (rc) {
1077             if (rc == -EINVAL)
1078                 return rc;
1079 
1080             /* -EAGAIN, fixup of userspace mm and gmap */
1081             vmaddr = __gmap_translate(gmap, gaddr);
1082             if (IS_ERR_VALUE(vmaddr))
1083                 return vmaddr;
1084             rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
1085             if (rc)
1086                 return rc;
1087         }
1088     }
1089     return 0;
1090 }
1091 
1092 /**
1093  * gmap_mprotect_notify - change access rights for a range of ptes and
1094  *                        call the notifier if any pte changes again
1095  * @gmap: pointer to guest mapping meta data structure
1096  * @gaddr: virtual address in the guest address space
1097  * @len: size of area
1098  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1099  *
1100  * Returns 0 if for each page in the given range a gmap mapping exists,
1101  * the new access rights could be set and the notifier could be armed.
1102  * If the gmap mapping is missing for one or more pages -EFAULT is
1103  * returned. If no memory could be allocated -ENOMEM is returned.
1104  * This function establishes missing page table entries.
1105  */
1106 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1107              unsigned long len, int prot)
1108 {
1109     int rc;
1110 
1111     if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
1112         return -EINVAL;
1113     if (!MACHINE_HAS_ESOP && prot == PROT_READ)
1114         return -EINVAL;
1115     mmap_read_lock(gmap->mm);
1116     rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
1117     mmap_read_unlock(gmap->mm);
1118     return rc;
1119 }
1120 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
1121 
1122 /**
1123  * gmap_read_table - get an unsigned long value from a guest page table using
1124  *                   absolute addressing, without marking the page referenced.
1125  * @gmap: pointer to guest mapping meta data structure
1126  * @gaddr: virtual address in the guest address space
1127  * @val: pointer to the unsigned long value to return
1128  *
1129  * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
1130  * if reading using the virtual address failed. -EINVAL if called on a gmap
1131  * shadow.
1132  *
1133  * Called with gmap->mm->mmap_lock in read.
1134  */
1135 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1136 {
1137     unsigned long address, vmaddr;
1138     spinlock_t *ptl;
1139     pte_t *ptep, pte;
1140     int rc;
1141 
1142     if (gmap_is_shadow(gmap))
1143         return -EINVAL;
1144 
1145     while (1) {
1146         rc = -EAGAIN;
1147         ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1148         if (ptep) {
1149             pte = *ptep;
1150             if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
1151                 address = pte_val(pte) & PAGE_MASK;
1152                 address += gaddr & ~PAGE_MASK;
1153                 *val = *(unsigned long *) address;
1154                 set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
1155                 /* Do *NOT* clear the _PAGE_INVALID bit! */
1156                 rc = 0;
1157             }
1158             gmap_pte_op_end(ptl);
1159         }
1160         if (!rc)
1161             break;
1162         vmaddr = __gmap_translate(gmap, gaddr);
1163         if (IS_ERR_VALUE(vmaddr)) {
1164             rc = vmaddr;
1165             break;
1166         }
1167         rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1168         if (rc)
1169             break;
1170     }
1171     return rc;
1172 }
1173 EXPORT_SYMBOL_GPL(gmap_read_table);
1174 
1175 /**
1176  * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1177  * @sg: pointer to the shadow guest address space structure
1178  * @vmaddr: vm address associated with the rmap
1179  * @rmap: pointer to the rmap structure
1180  *
1181  * Called with the sg->guest_table_lock
1182  */
1183 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1184                     struct gmap_rmap *rmap)
1185 {
1186     struct gmap_rmap *temp;
1187     void __rcu **slot;
1188 
1189     BUG_ON(!gmap_is_shadow(sg));
1190     slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1191     if (slot) {
1192         rmap->next = radix_tree_deref_slot_protected(slot,
1193                             &sg->guest_table_lock);
1194         for (temp = rmap->next; temp; temp = temp->next) {
1195             if (temp->raddr == rmap->raddr) {
1196                 kfree(rmap);
1197                 return;
1198             }
1199         }
1200         radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1201     } else {
1202         rmap->next = NULL;
1203         radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1204                   rmap);
1205     }
1206 }
1207 
1208 /**
1209  * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
1210  * @sg: pointer to the shadow guest address space structure
1211  * @raddr: rmap address in the shadow gmap
1212  * @paddr: address in the parent guest address space
1213  * @len: length of the memory area to protect
1214  *
1215  * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1216  * if out of memory and -EFAULT if paddr is invalid.
1217  */
1218 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1219                  unsigned long paddr, unsigned long len)
1220 {
1221     struct gmap *parent;
1222     struct gmap_rmap *rmap;
1223     unsigned long vmaddr;
1224     spinlock_t *ptl;
1225     pte_t *ptep;
1226     int rc;
1227 
1228     BUG_ON(!gmap_is_shadow(sg));
1229     parent = sg->parent;
1230     while (len) {
1231         vmaddr = __gmap_translate(parent, paddr);
1232         if (IS_ERR_VALUE(vmaddr))
1233             return vmaddr;
1234         rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
1235         if (!rmap)
1236             return -ENOMEM;
1237         rmap->raddr = raddr;
1238         rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
1239         if (rc) {
1240             kfree(rmap);
1241             return rc;
1242         }
1243         rc = -EAGAIN;
1244         ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1245         if (ptep) {
1246             spin_lock(&sg->guest_table_lock);
1247             rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
1248                          PGSTE_VSIE_BIT);
1249             if (!rc)
1250                 gmap_insert_rmap(sg, vmaddr, rmap);
1251             spin_unlock(&sg->guest_table_lock);
1252             gmap_pte_op_end(ptl);
1253         }
1254         radix_tree_preload_end();
1255         if (rc) {
1256             kfree(rmap);
1257             rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
1258             if (rc)
1259                 return rc;
1260             continue;
1261         }
1262         paddr += PAGE_SIZE;
1263         len -= PAGE_SIZE;
1264     }
1265     return 0;
1266 }
1267 
1268 #define _SHADOW_RMAP_MASK   0x7
1269 #define _SHADOW_RMAP_REGION1    0x5
1270 #define _SHADOW_RMAP_REGION2    0x4
1271 #define _SHADOW_RMAP_REGION3    0x3
1272 #define _SHADOW_RMAP_SEGMENT    0x2
1273 #define _SHADOW_RMAP_PGTABLE    0x1
1274 
1275 /**
1276  * gmap_idte_one - invalidate a single region or segment table entry
1277  * @asce: region or segment table *origin* + table-type bits
1278  * @vaddr: virtual address to identify the table entry to flush
1279  *
1280  * The invalid bit of a single region or segment table entry is set
1281  * and the associated TLB entries depending on the entry are flushed.
1282  * The table-type of the @asce identifies the portion of the @vaddr
1283  * that is used as the invalidation index.
1284  */
1285 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1286 {
1287     asm volatile(
1288         "   idte    %0,0,%1"
1289         : : "a" (asce), "a" (vaddr) : "cc", "memory");
1290 }
1291 
1292 /**
1293  * gmap_unshadow_page - remove a page from a shadow page table
1294  * @sg: pointer to the shadow guest address space structure
1295  * @raddr: rmap address in the shadow guest address space
1296  *
1297  * Called with the sg->guest_table_lock
1298  */
1299 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1300 {
1301     unsigned long *table;
1302 
1303     BUG_ON(!gmap_is_shadow(sg));
1304     table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1305     if (!table || *table & _PAGE_INVALID)
1306         return;
1307     gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1308     ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1309 }
1310 
1311 /**
1312  * __gmap_unshadow_pgt - remove all entries from a shadow page table
1313  * @sg: pointer to the shadow guest address space structure
1314  * @raddr: rmap address in the shadow guest address space
1315  * @pgt: pointer to the start of a shadow page table
1316  *
1317  * Called with the sg->guest_table_lock
1318  */
1319 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1320                 unsigned long *pgt)
1321 {
1322     int i;
1323 
1324     BUG_ON(!gmap_is_shadow(sg));
1325     for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1326         pgt[i] = _PAGE_INVALID;
1327 }
1328 
1329 /**
1330  * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1331  * @sg: pointer to the shadow guest address space structure
1332  * @raddr: address in the shadow guest address space
1333  *
1334  * Called with the sg->guest_table_lock
1335  */
1336 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1337 {
1338     unsigned long sto, *ste, *pgt;
1339     struct page *page;
1340 
1341     BUG_ON(!gmap_is_shadow(sg));
1342     ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1343     if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1344         return;
1345     gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1346     sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1347     gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1348     pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1349     *ste = _SEGMENT_ENTRY_EMPTY;
1350     __gmap_unshadow_pgt(sg, raddr, pgt);
1351     /* Free page table */
1352     page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1353     list_del(&page->lru);
1354     page_table_free_pgste(page);
1355 }
1356 
1357 /**
1358  * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1359  * @sg: pointer to the shadow guest address space structure
1360  * @raddr: rmap address in the shadow guest address space
1361  * @sgt: pointer to the start of a shadow segment table
1362  *
1363  * Called with the sg->guest_table_lock
1364  */
1365 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1366                 unsigned long *sgt)
1367 {
1368     unsigned long *pgt;
1369     struct page *page;
1370     int i;
1371 
1372     BUG_ON(!gmap_is_shadow(sg));
1373     for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1374         if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1375             continue;
1376         pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1377         sgt[i] = _SEGMENT_ENTRY_EMPTY;
1378         __gmap_unshadow_pgt(sg, raddr, pgt);
1379         /* Free page table */
1380         page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1381         list_del(&page->lru);
1382         page_table_free_pgste(page);
1383     }
1384 }
1385 
1386 /**
1387  * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1388  * @sg: pointer to the shadow guest address space structure
1389  * @raddr: rmap address in the shadow guest address space
1390  *
1391  * Called with the shadow->guest_table_lock
1392  */
1393 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1394 {
1395     unsigned long r3o, *r3e, *sgt;
1396     struct page *page;
1397 
1398     BUG_ON(!gmap_is_shadow(sg));
1399     r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1400     if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1401         return;
1402     gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1403     r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1404     gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1405     sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1406     *r3e = _REGION3_ENTRY_EMPTY;
1407     __gmap_unshadow_sgt(sg, raddr, sgt);
1408     /* Free segment table */
1409     page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1410     list_del(&page->lru);
1411     __free_pages(page, CRST_ALLOC_ORDER);
1412 }
1413 
1414 /**
1415  * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1416  * @sg: pointer to the shadow guest address space structure
1417  * @raddr: address in the shadow guest address space
1418  * @r3t: pointer to the start of a shadow region-3 table
1419  *
1420  * Called with the sg->guest_table_lock
1421  */
1422 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1423                 unsigned long *r3t)
1424 {
1425     unsigned long *sgt;
1426     struct page *page;
1427     int i;
1428 
1429     BUG_ON(!gmap_is_shadow(sg));
1430     for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1431         if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1432             continue;
1433         sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1434         r3t[i] = _REGION3_ENTRY_EMPTY;
1435         __gmap_unshadow_sgt(sg, raddr, sgt);
1436         /* Free segment table */
1437         page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1438         list_del(&page->lru);
1439         __free_pages(page, CRST_ALLOC_ORDER);
1440     }
1441 }
1442 
1443 /**
1444  * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1445  * @sg: pointer to the shadow guest address space structure
1446  * @raddr: rmap address in the shadow guest address space
1447  *
1448  * Called with the sg->guest_table_lock
1449  */
1450 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1451 {
1452     unsigned long r2o, *r2e, *r3t;
1453     struct page *page;
1454 
1455     BUG_ON(!gmap_is_shadow(sg));
1456     r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1457     if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1458         return;
1459     gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1460     r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1461     gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1462     r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1463     *r2e = _REGION2_ENTRY_EMPTY;
1464     __gmap_unshadow_r3t(sg, raddr, r3t);
1465     /* Free region 3 table */
1466     page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1467     list_del(&page->lru);
1468     __free_pages(page, CRST_ALLOC_ORDER);
1469 }
1470 
1471 /**
1472  * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1473  * @sg: pointer to the shadow guest address space structure
1474  * @raddr: rmap address in the shadow guest address space
1475  * @r2t: pointer to the start of a shadow region-2 table
1476  *
1477  * Called with the sg->guest_table_lock
1478  */
1479 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1480                 unsigned long *r2t)
1481 {
1482     unsigned long *r3t;
1483     struct page *page;
1484     int i;
1485 
1486     BUG_ON(!gmap_is_shadow(sg));
1487     for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1488         if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1489             continue;
1490         r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1491         r2t[i] = _REGION2_ENTRY_EMPTY;
1492         __gmap_unshadow_r3t(sg, raddr, r3t);
1493         /* Free region 3 table */
1494         page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1495         list_del(&page->lru);
1496         __free_pages(page, CRST_ALLOC_ORDER);
1497     }
1498 }
1499 
1500 /**
1501  * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1502  * @sg: pointer to the shadow guest address space structure
1503  * @raddr: rmap address in the shadow guest address space
1504  *
1505  * Called with the sg->guest_table_lock
1506  */
1507 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1508 {
1509     unsigned long r1o, *r1e, *r2t;
1510     struct page *page;
1511 
1512     BUG_ON(!gmap_is_shadow(sg));
1513     r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1514     if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1515         return;
1516     gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1517     r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1518     gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1519     r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1520     *r1e = _REGION1_ENTRY_EMPTY;
1521     __gmap_unshadow_r2t(sg, raddr, r2t);
1522     /* Free region 2 table */
1523     page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1524     list_del(&page->lru);
1525     __free_pages(page, CRST_ALLOC_ORDER);
1526 }
1527 
1528 /**
1529  * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1530  * @sg: pointer to the shadow guest address space structure
1531  * @raddr: rmap address in the shadow guest address space
1532  * @r1t: pointer to the start of a shadow region-1 table
1533  *
1534  * Called with the shadow->guest_table_lock
1535  */
1536 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1537                 unsigned long *r1t)
1538 {
1539     unsigned long asce, *r2t;
1540     struct page *page;
1541     int i;
1542 
1543     BUG_ON(!gmap_is_shadow(sg));
1544     asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1545     for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1546         if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1547             continue;
1548         r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1549         __gmap_unshadow_r2t(sg, raddr, r2t);
1550         /* Clear entry and flush translation r1t -> r2t */
1551         gmap_idte_one(asce, raddr);
1552         r1t[i] = _REGION1_ENTRY_EMPTY;
1553         /* Free region 2 table */
1554         page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1555         list_del(&page->lru);
1556         __free_pages(page, CRST_ALLOC_ORDER);
1557     }
1558 }
1559 
1560 /**
1561  * gmap_unshadow - remove a shadow page table completely
1562  * @sg: pointer to the shadow guest address space structure
1563  *
1564  * Called with sg->guest_table_lock
1565  */
1566 static void gmap_unshadow(struct gmap *sg)
1567 {
1568     unsigned long *table;
1569 
1570     BUG_ON(!gmap_is_shadow(sg));
1571     if (sg->removed)
1572         return;
1573     sg->removed = 1;
1574     gmap_call_notifier(sg, 0, -1UL);
1575     gmap_flush_tlb(sg);
1576     table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1577     switch (sg->asce & _ASCE_TYPE_MASK) {
1578     case _ASCE_TYPE_REGION1:
1579         __gmap_unshadow_r1t(sg, 0, table);
1580         break;
1581     case _ASCE_TYPE_REGION2:
1582         __gmap_unshadow_r2t(sg, 0, table);
1583         break;
1584     case _ASCE_TYPE_REGION3:
1585         __gmap_unshadow_r3t(sg, 0, table);
1586         break;
1587     case _ASCE_TYPE_SEGMENT:
1588         __gmap_unshadow_sgt(sg, 0, table);
1589         break;
1590     }
1591 }
1592 
1593 /**
1594  * gmap_find_shadow - find a specific asce in the list of shadow tables
1595  * @parent: pointer to the parent gmap
1596  * @asce: ASCE for which the shadow table is created
1597  * @edat_level: edat level to be used for the shadow translation
1598  *
1599  * Returns the pointer to a gmap if a shadow table with the given asce is
1600  * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1601  * otherwise NULL
1602  */
1603 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1604                      int edat_level)
1605 {
1606     struct gmap *sg;
1607 
1608     list_for_each_entry(sg, &parent->children, list) {
1609         if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1610             sg->removed)
1611             continue;
1612         if (!sg->initialized)
1613             return ERR_PTR(-EAGAIN);
1614         refcount_inc(&sg->ref_count);
1615         return sg;
1616     }
1617     return NULL;
1618 }
1619 
1620 /**
1621  * gmap_shadow_valid - check if a shadow guest address space matches the
1622  *                     given properties and is still valid
1623  * @sg: pointer to the shadow guest address space structure
1624  * @asce: ASCE for which the shadow table is requested
1625  * @edat_level: edat level to be used for the shadow translation
1626  *
1627  * Returns 1 if the gmap shadow is still valid and matches the given
1628  * properties, the caller can continue using it. Returns 0 otherwise, the
1629  * caller has to request a new shadow gmap in this case.
1630  *
1631  */
1632 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1633 {
1634     if (sg->removed)
1635         return 0;
1636     return sg->orig_asce == asce && sg->edat_level == edat_level;
1637 }
1638 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1639 
1640 /**
1641  * gmap_shadow - create/find a shadow guest address space
1642  * @parent: pointer to the parent gmap
1643  * @asce: ASCE for which the shadow table is created
1644  * @edat_level: edat level to be used for the shadow translation
1645  *
1646  * The pages of the top level page table referred by the asce parameter
1647  * will be set to read-only and marked in the PGSTEs of the kvm process.
1648  * The shadow table will be removed automatically on any change to the
1649  * PTE mapping for the source table.
1650  *
1651  * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1652  * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1653  * parent gmap table could not be protected.
1654  */
1655 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1656              int edat_level)
1657 {
1658     struct gmap *sg, *new;
1659     unsigned long limit;
1660     int rc;
1661 
1662     BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
1663     BUG_ON(gmap_is_shadow(parent));
1664     spin_lock(&parent->shadow_lock);
1665     sg = gmap_find_shadow(parent, asce, edat_level);
1666     spin_unlock(&parent->shadow_lock);
1667     if (sg)
1668         return sg;
1669     /* Create a new shadow gmap */
1670     limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1671     if (asce & _ASCE_REAL_SPACE)
1672         limit = -1UL;
1673     new = gmap_alloc(limit);
1674     if (!new)
1675         return ERR_PTR(-ENOMEM);
1676     new->mm = parent->mm;
1677     new->parent = gmap_get(parent);
1678     new->orig_asce = asce;
1679     new->edat_level = edat_level;
1680     new->initialized = false;
1681     spin_lock(&parent->shadow_lock);
1682     /* Recheck if another CPU created the same shadow */
1683     sg = gmap_find_shadow(parent, asce, edat_level);
1684     if (sg) {
1685         spin_unlock(&parent->shadow_lock);
1686         gmap_free(new);
1687         return sg;
1688     }
1689     if (asce & _ASCE_REAL_SPACE) {
1690         /* only allow one real-space gmap shadow */
1691         list_for_each_entry(sg, &parent->children, list) {
1692             if (sg->orig_asce & _ASCE_REAL_SPACE) {
1693                 spin_lock(&sg->guest_table_lock);
1694                 gmap_unshadow(sg);
1695                 spin_unlock(&sg->guest_table_lock);
1696                 list_del(&sg->list);
1697                 gmap_put(sg);
1698                 break;
1699             }
1700         }
1701     }
1702     refcount_set(&new->ref_count, 2);
1703     list_add(&new->list, &parent->children);
1704     if (asce & _ASCE_REAL_SPACE) {
1705         /* nothing to protect, return right away */
1706         new->initialized = true;
1707         spin_unlock(&parent->shadow_lock);
1708         return new;
1709     }
1710     spin_unlock(&parent->shadow_lock);
1711     /* protect after insertion, so it will get properly invalidated */
1712     mmap_read_lock(parent->mm);
1713     rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1714                 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1715                 PROT_READ, GMAP_NOTIFY_SHADOW);
1716     mmap_read_unlock(parent->mm);
1717     spin_lock(&parent->shadow_lock);
1718     new->initialized = true;
1719     if (rc) {
1720         list_del(&new->list);
1721         gmap_free(new);
1722         new = ERR_PTR(rc);
1723     }
1724     spin_unlock(&parent->shadow_lock);
1725     return new;
1726 }
1727 EXPORT_SYMBOL_GPL(gmap_shadow);
1728 
1729 /**
1730  * gmap_shadow_r2t - create an empty shadow region 2 table
1731  * @sg: pointer to the shadow guest address space structure
1732  * @saddr: faulting address in the shadow gmap
1733  * @r2t: parent gmap address of the region 2 table to get shadowed
1734  * @fake: r2t references contiguous guest memory block, not a r2t
1735  *
1736  * The r2t parameter specifies the address of the source table. The
1737  * four pages of the source table are made read-only in the parent gmap
1738  * address space. A write to the source table area @r2t will automatically
1739  * remove the shadow r2 table and all of its decendents.
1740  *
1741  * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1742  * shadow table structure is incomplete, -ENOMEM if out of memory and
1743  * -EFAULT if an address in the parent gmap could not be resolved.
1744  *
1745  * Called with sg->mm->mmap_lock in read.
1746  */
1747 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1748             int fake)
1749 {
1750     unsigned long raddr, origin, offset, len;
1751     unsigned long *s_r2t, *table;
1752     struct page *page;
1753     int rc;
1754 
1755     BUG_ON(!gmap_is_shadow(sg));
1756     /* Allocate a shadow region second table */
1757     page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1758     if (!page)
1759         return -ENOMEM;
1760     page->index = r2t & _REGION_ENTRY_ORIGIN;
1761     if (fake)
1762         page->index |= GMAP_SHADOW_FAKE_TABLE;
1763     s_r2t = (unsigned long *) page_to_phys(page);
1764     /* Install shadow region second table */
1765     spin_lock(&sg->guest_table_lock);
1766     table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1767     if (!table) {
1768         rc = -EAGAIN;       /* Race with unshadow */
1769         goto out_free;
1770     }
1771     if (!(*table & _REGION_ENTRY_INVALID)) {
1772         rc = 0;         /* Already established */
1773         goto out_free;
1774     } else if (*table & _REGION_ENTRY_ORIGIN) {
1775         rc = -EAGAIN;       /* Race with shadow */
1776         goto out_free;
1777     }
1778     crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1779     /* mark as invalid as long as the parent table is not protected */
1780     *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1781          _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1782     if (sg->edat_level >= 1)
1783         *table |= (r2t & _REGION_ENTRY_PROTECT);
1784     list_add(&page->lru, &sg->crst_list);
1785     if (fake) {
1786         /* nothing to protect for fake tables */
1787         *table &= ~_REGION_ENTRY_INVALID;
1788         spin_unlock(&sg->guest_table_lock);
1789         return 0;
1790     }
1791     spin_unlock(&sg->guest_table_lock);
1792     /* Make r2t read-only in parent gmap page table */
1793     raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1794     origin = r2t & _REGION_ENTRY_ORIGIN;
1795     offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1796     len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1797     rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1798     spin_lock(&sg->guest_table_lock);
1799     if (!rc) {
1800         table = gmap_table_walk(sg, saddr, 4);
1801         if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1802                   (unsigned long) s_r2t)
1803             rc = -EAGAIN;       /* Race with unshadow */
1804         else
1805             *table &= ~_REGION_ENTRY_INVALID;
1806     } else {
1807         gmap_unshadow_r2t(sg, raddr);
1808     }
1809     spin_unlock(&sg->guest_table_lock);
1810     return rc;
1811 out_free:
1812     spin_unlock(&sg->guest_table_lock);
1813     __free_pages(page, CRST_ALLOC_ORDER);
1814     return rc;
1815 }
1816 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1817 
1818 /**
1819  * gmap_shadow_r3t - create a shadow region 3 table
1820  * @sg: pointer to the shadow guest address space structure
1821  * @saddr: faulting address in the shadow gmap
1822  * @r3t: parent gmap address of the region 3 table to get shadowed
1823  * @fake: r3t references contiguous guest memory block, not a r3t
1824  *
1825  * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1826  * shadow table structure is incomplete, -ENOMEM if out of memory and
1827  * -EFAULT if an address in the parent gmap could not be resolved.
1828  *
1829  * Called with sg->mm->mmap_lock in read.
1830  */
1831 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1832             int fake)
1833 {
1834     unsigned long raddr, origin, offset, len;
1835     unsigned long *s_r3t, *table;
1836     struct page *page;
1837     int rc;
1838 
1839     BUG_ON(!gmap_is_shadow(sg));
1840     /* Allocate a shadow region second table */
1841     page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1842     if (!page)
1843         return -ENOMEM;
1844     page->index = r3t & _REGION_ENTRY_ORIGIN;
1845     if (fake)
1846         page->index |= GMAP_SHADOW_FAKE_TABLE;
1847     s_r3t = (unsigned long *) page_to_phys(page);
1848     /* Install shadow region second table */
1849     spin_lock(&sg->guest_table_lock);
1850     table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1851     if (!table) {
1852         rc = -EAGAIN;       /* Race with unshadow */
1853         goto out_free;
1854     }
1855     if (!(*table & _REGION_ENTRY_INVALID)) {
1856         rc = 0;         /* Already established */
1857         goto out_free;
1858     } else if (*table & _REGION_ENTRY_ORIGIN) {
1859         rc = -EAGAIN;       /* Race with shadow */
1860         goto out_free;
1861     }
1862     crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1863     /* mark as invalid as long as the parent table is not protected */
1864     *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1865          _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1866     if (sg->edat_level >= 1)
1867         *table |= (r3t & _REGION_ENTRY_PROTECT);
1868     list_add(&page->lru, &sg->crst_list);
1869     if (fake) {
1870         /* nothing to protect for fake tables */
1871         *table &= ~_REGION_ENTRY_INVALID;
1872         spin_unlock(&sg->guest_table_lock);
1873         return 0;
1874     }
1875     spin_unlock(&sg->guest_table_lock);
1876     /* Make r3t read-only in parent gmap page table */
1877     raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1878     origin = r3t & _REGION_ENTRY_ORIGIN;
1879     offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1880     len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1881     rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1882     spin_lock(&sg->guest_table_lock);
1883     if (!rc) {
1884         table = gmap_table_walk(sg, saddr, 3);
1885         if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1886                   (unsigned long) s_r3t)
1887             rc = -EAGAIN;       /* Race with unshadow */
1888         else
1889             *table &= ~_REGION_ENTRY_INVALID;
1890     } else {
1891         gmap_unshadow_r3t(sg, raddr);
1892     }
1893     spin_unlock(&sg->guest_table_lock);
1894     return rc;
1895 out_free:
1896     spin_unlock(&sg->guest_table_lock);
1897     __free_pages(page, CRST_ALLOC_ORDER);
1898     return rc;
1899 }
1900 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1901 
1902 /**
1903  * gmap_shadow_sgt - create a shadow segment table
1904  * @sg: pointer to the shadow guest address space structure
1905  * @saddr: faulting address in the shadow gmap
1906  * @sgt: parent gmap address of the segment table to get shadowed
1907  * @fake: sgt references contiguous guest memory block, not a sgt
1908  *
1909  * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1910  * shadow table structure is incomplete, -ENOMEM if out of memory and
1911  * -EFAULT if an address in the parent gmap could not be resolved.
1912  *
1913  * Called with sg->mm->mmap_lock in read.
1914  */
1915 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1916             int fake)
1917 {
1918     unsigned long raddr, origin, offset, len;
1919     unsigned long *s_sgt, *table;
1920     struct page *page;
1921     int rc;
1922 
1923     BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1924     /* Allocate a shadow segment table */
1925     page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1926     if (!page)
1927         return -ENOMEM;
1928     page->index = sgt & _REGION_ENTRY_ORIGIN;
1929     if (fake)
1930         page->index |= GMAP_SHADOW_FAKE_TABLE;
1931     s_sgt = (unsigned long *) page_to_phys(page);
1932     /* Install shadow region second table */
1933     spin_lock(&sg->guest_table_lock);
1934     table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1935     if (!table) {
1936         rc = -EAGAIN;       /* Race with unshadow */
1937         goto out_free;
1938     }
1939     if (!(*table & _REGION_ENTRY_INVALID)) {
1940         rc = 0;         /* Already established */
1941         goto out_free;
1942     } else if (*table & _REGION_ENTRY_ORIGIN) {
1943         rc = -EAGAIN;       /* Race with shadow */
1944         goto out_free;
1945     }
1946     crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1947     /* mark as invalid as long as the parent table is not protected */
1948     *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1949          _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1950     if (sg->edat_level >= 1)
1951         *table |= sgt & _REGION_ENTRY_PROTECT;
1952     list_add(&page->lru, &sg->crst_list);
1953     if (fake) {
1954         /* nothing to protect for fake tables */
1955         *table &= ~_REGION_ENTRY_INVALID;
1956         spin_unlock(&sg->guest_table_lock);
1957         return 0;
1958     }
1959     spin_unlock(&sg->guest_table_lock);
1960     /* Make sgt read-only in parent gmap page table */
1961     raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1962     origin = sgt & _REGION_ENTRY_ORIGIN;
1963     offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1964     len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1965     rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1966     spin_lock(&sg->guest_table_lock);
1967     if (!rc) {
1968         table = gmap_table_walk(sg, saddr, 2);
1969         if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1970                   (unsigned long) s_sgt)
1971             rc = -EAGAIN;       /* Race with unshadow */
1972         else
1973             *table &= ~_REGION_ENTRY_INVALID;
1974     } else {
1975         gmap_unshadow_sgt(sg, raddr);
1976     }
1977     spin_unlock(&sg->guest_table_lock);
1978     return rc;
1979 out_free:
1980     spin_unlock(&sg->guest_table_lock);
1981     __free_pages(page, CRST_ALLOC_ORDER);
1982     return rc;
1983 }
1984 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1985 
1986 /**
1987  * gmap_shadow_pgt_lookup - find a shadow page table
1988  * @sg: pointer to the shadow guest address space structure
1989  * @saddr: the address in the shadow aguest address space
1990  * @pgt: parent gmap address of the page table to get shadowed
1991  * @dat_protection: if the pgtable is marked as protected by dat
1992  * @fake: pgt references contiguous guest memory block, not a pgtable
1993  *
1994  * Returns 0 if the shadow page table was found and -EAGAIN if the page
1995  * table was not found.
1996  *
1997  * Called with sg->mm->mmap_lock in read.
1998  */
1999 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
2000                unsigned long *pgt, int *dat_protection,
2001                int *fake)
2002 {
2003     unsigned long *table;
2004     struct page *page;
2005     int rc;
2006 
2007     BUG_ON(!gmap_is_shadow(sg));
2008     spin_lock(&sg->guest_table_lock);
2009     table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2010     if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
2011         /* Shadow page tables are full pages (pte+pgste) */
2012         page = pfn_to_page(*table >> PAGE_SHIFT);
2013         *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
2014         *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
2015         *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
2016         rc = 0;
2017     } else  {
2018         rc = -EAGAIN;
2019     }
2020     spin_unlock(&sg->guest_table_lock);
2021     return rc;
2022 
2023 }
2024 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
2025 
2026 /**
2027  * gmap_shadow_pgt - instantiate a shadow page table
2028  * @sg: pointer to the shadow guest address space structure
2029  * @saddr: faulting address in the shadow gmap
2030  * @pgt: parent gmap address of the page table to get shadowed
2031  * @fake: pgt references contiguous guest memory block, not a pgtable
2032  *
2033  * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2034  * shadow table structure is incomplete, -ENOMEM if out of memory,
2035  * -EFAULT if an address in the parent gmap could not be resolved and
2036  *
2037  * Called with gmap->mm->mmap_lock in read
2038  */
2039 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2040             int fake)
2041 {
2042     unsigned long raddr, origin;
2043     unsigned long *s_pgt, *table;
2044     struct page *page;
2045     int rc;
2046 
2047     BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
2048     /* Allocate a shadow page table */
2049     page = page_table_alloc_pgste(sg->mm);
2050     if (!page)
2051         return -ENOMEM;
2052     page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
2053     if (fake)
2054         page->index |= GMAP_SHADOW_FAKE_TABLE;
2055     s_pgt = (unsigned long *) page_to_phys(page);
2056     /* Install shadow page table */
2057     spin_lock(&sg->guest_table_lock);
2058     table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2059     if (!table) {
2060         rc = -EAGAIN;       /* Race with unshadow */
2061         goto out_free;
2062     }
2063     if (!(*table & _SEGMENT_ENTRY_INVALID)) {
2064         rc = 0;         /* Already established */
2065         goto out_free;
2066     } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
2067         rc = -EAGAIN;       /* Race with shadow */
2068         goto out_free;
2069     }
2070     /* mark as invalid as long as the parent table is not protected */
2071     *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
2072          (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
2073     list_add(&page->lru, &sg->pt_list);
2074     if (fake) {
2075         /* nothing to protect for fake tables */
2076         *table &= ~_SEGMENT_ENTRY_INVALID;
2077         spin_unlock(&sg->guest_table_lock);
2078         return 0;
2079     }
2080     spin_unlock(&sg->guest_table_lock);
2081     /* Make pgt read-only in parent gmap page table (not the pgste) */
2082     raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
2083     origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
2084     rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
2085     spin_lock(&sg->guest_table_lock);
2086     if (!rc) {
2087         table = gmap_table_walk(sg, saddr, 1);
2088         if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
2089                   (unsigned long) s_pgt)
2090             rc = -EAGAIN;       /* Race with unshadow */
2091         else
2092             *table &= ~_SEGMENT_ENTRY_INVALID;
2093     } else {
2094         gmap_unshadow_pgt(sg, raddr);
2095     }
2096     spin_unlock(&sg->guest_table_lock);
2097     return rc;
2098 out_free:
2099     spin_unlock(&sg->guest_table_lock);
2100     page_table_free_pgste(page);
2101     return rc;
2102 
2103 }
2104 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
2105 
2106 /**
2107  * gmap_shadow_page - create a shadow page mapping
2108  * @sg: pointer to the shadow guest address space structure
2109  * @saddr: faulting address in the shadow gmap
2110  * @pte: pte in parent gmap address space to get shadowed
2111  *
2112  * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2113  * shadow table structure is incomplete, -ENOMEM if out of memory and
2114  * -EFAULT if an address in the parent gmap could not be resolved.
2115  *
2116  * Called with sg->mm->mmap_lock in read.
2117  */
2118 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
2119 {
2120     struct gmap *parent;
2121     struct gmap_rmap *rmap;
2122     unsigned long vmaddr, paddr;
2123     spinlock_t *ptl;
2124     pte_t *sptep, *tptep;
2125     int prot;
2126     int rc;
2127 
2128     BUG_ON(!gmap_is_shadow(sg));
2129     parent = sg->parent;
2130     prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
2131 
2132     rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
2133     if (!rmap)
2134         return -ENOMEM;
2135     rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
2136 
2137     while (1) {
2138         paddr = pte_val(pte) & PAGE_MASK;
2139         vmaddr = __gmap_translate(parent, paddr);
2140         if (IS_ERR_VALUE(vmaddr)) {
2141             rc = vmaddr;
2142             break;
2143         }
2144         rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
2145         if (rc)
2146             break;
2147         rc = -EAGAIN;
2148         sptep = gmap_pte_op_walk(parent, paddr, &ptl);
2149         if (sptep) {
2150             spin_lock(&sg->guest_table_lock);
2151             /* Get page table pointer */
2152             tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
2153             if (!tptep) {
2154                 spin_unlock(&sg->guest_table_lock);
2155                 gmap_pte_op_end(ptl);
2156                 radix_tree_preload_end();
2157                 break;
2158             }
2159             rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
2160             if (rc > 0) {
2161                 /* Success and a new mapping */
2162                 gmap_insert_rmap(sg, vmaddr, rmap);
2163                 rmap = NULL;
2164                 rc = 0;
2165             }
2166             gmap_pte_op_end(ptl);
2167             spin_unlock(&sg->guest_table_lock);
2168         }
2169         radix_tree_preload_end();
2170         if (!rc)
2171             break;
2172         rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2173         if (rc)
2174             break;
2175     }
2176     kfree(rmap);
2177     return rc;
2178 }
2179 EXPORT_SYMBOL_GPL(gmap_shadow_page);
2180 
2181 /*
2182  * gmap_shadow_notify - handle notifications for shadow gmap
2183  *
2184  * Called with sg->parent->shadow_lock.
2185  */
2186 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2187                    unsigned long gaddr)
2188 {
2189     struct gmap_rmap *rmap, *rnext, *head;
2190     unsigned long start, end, bits, raddr;
2191 
2192     BUG_ON(!gmap_is_shadow(sg));
2193 
2194     spin_lock(&sg->guest_table_lock);
2195     if (sg->removed) {
2196         spin_unlock(&sg->guest_table_lock);
2197         return;
2198     }
2199     /* Check for top level table */
2200     start = sg->orig_asce & _ASCE_ORIGIN;
2201     end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2202     if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2203         gaddr < end) {
2204         /* The complete shadow table has to go */
2205         gmap_unshadow(sg);
2206         spin_unlock(&sg->guest_table_lock);
2207         list_del(&sg->list);
2208         gmap_put(sg);
2209         return;
2210     }
2211     /* Remove the page table tree from on specific entry */
2212     head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2213     gmap_for_each_rmap_safe(rmap, rnext, head) {
2214         bits = rmap->raddr & _SHADOW_RMAP_MASK;
2215         raddr = rmap->raddr ^ bits;
2216         switch (bits) {
2217         case _SHADOW_RMAP_REGION1:
2218             gmap_unshadow_r2t(sg, raddr);
2219             break;
2220         case _SHADOW_RMAP_REGION2:
2221             gmap_unshadow_r3t(sg, raddr);
2222             break;
2223         case _SHADOW_RMAP_REGION3:
2224             gmap_unshadow_sgt(sg, raddr);
2225             break;
2226         case _SHADOW_RMAP_SEGMENT:
2227             gmap_unshadow_pgt(sg, raddr);
2228             break;
2229         case _SHADOW_RMAP_PGTABLE:
2230             gmap_unshadow_page(sg, raddr);
2231             break;
2232         }
2233         kfree(rmap);
2234     }
2235     spin_unlock(&sg->guest_table_lock);
2236 }
2237 
2238 /**
2239  * ptep_notify - call all invalidation callbacks for a specific pte.
2240  * @mm: pointer to the process mm_struct
2241  * @vmaddr: virtual address in the process address space
2242  * @pte: pointer to the page table entry
2243  * @bits: bits from the pgste that caused the notify call
2244  *
2245  * This function is assumed to be called with the page table lock held
2246  * for the pte to notify.
2247  */
2248 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2249          pte_t *pte, unsigned long bits)
2250 {
2251     unsigned long offset, gaddr = 0;
2252     unsigned long *table;
2253     struct gmap *gmap, *sg, *next;
2254 
2255     offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2256     offset = offset * (PAGE_SIZE / sizeof(pte_t));
2257     rcu_read_lock();
2258     list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2259         spin_lock(&gmap->guest_table_lock);
2260         table = radix_tree_lookup(&gmap->host_to_guest,
2261                       vmaddr >> PMD_SHIFT);
2262         if (table)
2263             gaddr = __gmap_segment_gaddr(table) + offset;
2264         spin_unlock(&gmap->guest_table_lock);
2265         if (!table)
2266             continue;
2267 
2268         if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2269             spin_lock(&gmap->shadow_lock);
2270             list_for_each_entry_safe(sg, next,
2271                          &gmap->children, list)
2272                 gmap_shadow_notify(sg, vmaddr, gaddr);
2273             spin_unlock(&gmap->shadow_lock);
2274         }
2275         if (bits & PGSTE_IN_BIT)
2276             gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2277     }
2278     rcu_read_unlock();
2279 }
2280 EXPORT_SYMBOL_GPL(ptep_notify);
2281 
2282 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2283                  unsigned long gaddr)
2284 {
2285     set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
2286     gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2287 }
2288 
2289 /**
2290  * gmap_pmdp_xchg - exchange a gmap pmd with another
2291  * @gmap: pointer to the guest address space structure
2292  * @pmdp: pointer to the pmd entry
2293  * @new: replacement entry
2294  * @gaddr: the affected guest address
2295  *
2296  * This function is assumed to be called with the guest_table_lock
2297  * held.
2298  */
2299 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2300                unsigned long gaddr)
2301 {
2302     gaddr &= HPAGE_MASK;
2303     pmdp_notify_gmap(gmap, pmdp, gaddr);
2304     new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN));
2305     if (MACHINE_HAS_TLB_GUEST)
2306         __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2307                 IDTE_GLOBAL);
2308     else if (MACHINE_HAS_IDTE)
2309         __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
2310     else
2311         __pmdp_csp(pmdp);
2312     set_pmd(pmdp, new);
2313 }
2314 
2315 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2316                 int purge)
2317 {
2318     pmd_t *pmdp;
2319     struct gmap *gmap;
2320     unsigned long gaddr;
2321 
2322     rcu_read_lock();
2323     list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2324         spin_lock(&gmap->guest_table_lock);
2325         pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2326                           vmaddr >> PMD_SHIFT);
2327         if (pmdp) {
2328             gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
2329             pmdp_notify_gmap(gmap, pmdp, gaddr);
2330             WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2331                            _SEGMENT_ENTRY_GMAP_UC));
2332             if (purge)
2333                 __pmdp_csp(pmdp);
2334             set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
2335         }
2336         spin_unlock(&gmap->guest_table_lock);
2337     }
2338     rcu_read_unlock();
2339 }
2340 
2341 /**
2342  * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
2343  *                        flushing
2344  * @mm: pointer to the process mm_struct
2345  * @vmaddr: virtual address in the process address space
2346  */
2347 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
2348 {
2349     gmap_pmdp_clear(mm, vmaddr, 0);
2350 }
2351 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
2352 
2353 /**
2354  * gmap_pmdp_csp - csp all affected guest pmd entries
2355  * @mm: pointer to the process mm_struct
2356  * @vmaddr: virtual address in the process address space
2357  */
2358 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
2359 {
2360     gmap_pmdp_clear(mm, vmaddr, 1);
2361 }
2362 EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2363 
2364 /**
2365  * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
2366  * @mm: pointer to the process mm_struct
2367  * @vmaddr: virtual address in the process address space
2368  */
2369 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
2370 {
2371     unsigned long *entry, gaddr;
2372     struct gmap *gmap;
2373     pmd_t *pmdp;
2374 
2375     rcu_read_lock();
2376     list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2377         spin_lock(&gmap->guest_table_lock);
2378         entry = radix_tree_delete(&gmap->host_to_guest,
2379                       vmaddr >> PMD_SHIFT);
2380         if (entry) {
2381             pmdp = (pmd_t *)entry;
2382             gaddr = __gmap_segment_gaddr(entry);
2383             pmdp_notify_gmap(gmap, pmdp, gaddr);
2384             WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2385                        _SEGMENT_ENTRY_GMAP_UC));
2386             if (MACHINE_HAS_TLB_GUEST)
2387                 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2388                         gmap->asce, IDTE_LOCAL);
2389             else if (MACHINE_HAS_IDTE)
2390                 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2391             *entry = _SEGMENT_ENTRY_EMPTY;
2392         }
2393         spin_unlock(&gmap->guest_table_lock);
2394     }
2395     rcu_read_unlock();
2396 }
2397 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2398 
2399 /**
2400  * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
2401  * @mm: pointer to the process mm_struct
2402  * @vmaddr: virtual address in the process address space
2403  */
2404 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
2405 {
2406     unsigned long *entry, gaddr;
2407     struct gmap *gmap;
2408     pmd_t *pmdp;
2409 
2410     rcu_read_lock();
2411     list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2412         spin_lock(&gmap->guest_table_lock);
2413         entry = radix_tree_delete(&gmap->host_to_guest,
2414                       vmaddr >> PMD_SHIFT);
2415         if (entry) {
2416             pmdp = (pmd_t *)entry;
2417             gaddr = __gmap_segment_gaddr(entry);
2418             pmdp_notify_gmap(gmap, pmdp, gaddr);
2419             WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2420                        _SEGMENT_ENTRY_GMAP_UC));
2421             if (MACHINE_HAS_TLB_GUEST)
2422                 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2423                         gmap->asce, IDTE_GLOBAL);
2424             else if (MACHINE_HAS_IDTE)
2425                 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
2426             else
2427                 __pmdp_csp(pmdp);
2428             *entry = _SEGMENT_ENTRY_EMPTY;
2429         }
2430         spin_unlock(&gmap->guest_table_lock);
2431     }
2432     rcu_read_unlock();
2433 }
2434 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2435 
2436 /**
2437  * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
2438  * @gmap: pointer to guest address space
2439  * @pmdp: pointer to the pmd to be tested
2440  * @gaddr: virtual address in the guest address space
2441  *
2442  * This function is assumed to be called with the guest_table_lock
2443  * held.
2444  */
2445 static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2446                       unsigned long gaddr)
2447 {
2448     if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2449         return false;
2450 
2451     /* Already protected memory, which did not change is clean */
2452     if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
2453         !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
2454         return false;
2455 
2456     /* Clear UC indication and reset protection */
2457     set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC)));
2458     gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2459     return true;
2460 }
2461 
2462 /**
2463  * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
2464  * @gmap: pointer to guest address space
2465  * @bitmap: dirty bitmap for this pmd
2466  * @gaddr: virtual address in the guest address space
2467  * @vmaddr: virtual address in the host address space
2468  *
2469  * This function is assumed to be called with the guest_table_lock
2470  * held.
2471  */
2472 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2473                  unsigned long gaddr, unsigned long vmaddr)
2474 {
2475     int i;
2476     pmd_t *pmdp;
2477     pte_t *ptep;
2478     spinlock_t *ptl;
2479 
2480     pmdp = gmap_pmd_op_walk(gmap, gaddr);
2481     if (!pmdp)
2482         return;
2483 
2484     if (pmd_large(*pmdp)) {
2485         if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2486             bitmap_fill(bitmap, _PAGE_ENTRIES);
2487     } else {
2488         for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
2489             ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2490             if (!ptep)
2491                 continue;
2492             if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2493                 set_bit(i, bitmap);
2494             spin_unlock(ptl);
2495         }
2496     }
2497     gmap_pmd_op_end(gmap, pmdp);
2498 }
2499 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
2500 
2501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2502 static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
2503                     unsigned long end, struct mm_walk *walk)
2504 {
2505     struct vm_area_struct *vma = walk->vma;
2506 
2507     split_huge_pmd(vma, pmd, addr);
2508     return 0;
2509 }
2510 
2511 static const struct mm_walk_ops thp_split_walk_ops = {
2512     .pmd_entry  = thp_split_walk_pmd_entry,
2513 };
2514 
2515 static inline void thp_split_mm(struct mm_struct *mm)
2516 {
2517     struct vm_area_struct *vma;
2518 
2519     for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2520         vma->vm_flags &= ~VM_HUGEPAGE;
2521         vma->vm_flags |= VM_NOHUGEPAGE;
2522         walk_page_vma(vma, &thp_split_walk_ops, NULL);
2523     }
2524     mm->def_flags |= VM_NOHUGEPAGE;
2525 }
2526 #else
2527 static inline void thp_split_mm(struct mm_struct *mm)
2528 {
2529 }
2530 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2531 
2532 /*
2533  * Remove all empty zero pages from the mapping for lazy refaulting
2534  * - This must be called after mm->context.has_pgste is set, to avoid
2535  *   future creation of zero pages
2536  * - This must be called after THP was enabled
2537  */
2538 static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2539                unsigned long end, struct mm_walk *walk)
2540 {
2541     unsigned long addr;
2542 
2543     for (addr = start; addr != end; addr += PAGE_SIZE) {
2544         pte_t *ptep;
2545         spinlock_t *ptl;
2546 
2547         ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2548         if (is_zero_pfn(pte_pfn(*ptep)))
2549             ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2550         pte_unmap_unlock(ptep, ptl);
2551     }
2552     return 0;
2553 }
2554 
2555 static const struct mm_walk_ops zap_zero_walk_ops = {
2556     .pmd_entry  = __zap_zero_pages,
2557 };
2558 
2559 /*
2560  * switch on pgstes for its userspace process (for kvm)
2561  */
2562 int s390_enable_sie(void)
2563 {
2564     struct mm_struct *mm = current->mm;
2565 
2566     /* Do we have pgstes? if yes, we are done */
2567     if (mm_has_pgste(mm))
2568         return 0;
2569     /* Fail if the page tables are 2K */
2570     if (!mm_alloc_pgste(mm))
2571         return -EINVAL;
2572     mmap_write_lock(mm);
2573     mm->context.has_pgste = 1;
2574     /* split thp mappings and disable thp for future mappings */
2575     thp_split_mm(mm);
2576     walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
2577     mmap_write_unlock(mm);
2578     return 0;
2579 }
2580 EXPORT_SYMBOL_GPL(s390_enable_sie);
2581 
2582 int gmap_mark_unmergeable(void)
2583 {
2584     struct mm_struct *mm = current->mm;
2585     struct vm_area_struct *vma;
2586     int ret;
2587 
2588     for (vma = mm->mmap; vma; vma = vma->vm_next) {
2589         ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
2590                   MADV_UNMERGEABLE, &vma->vm_flags);
2591         if (ret)
2592             return ret;
2593     }
2594     mm->def_flags &= ~VM_MERGEABLE;
2595     return 0;
2596 }
2597 EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
2598 
2599 /*
2600  * Enable storage key handling from now on and initialize the storage
2601  * keys with the default key.
2602  */
2603 static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
2604                   unsigned long next, struct mm_walk *walk)
2605 {
2606     /* Clear storage key */
2607     ptep_zap_key(walk->mm, addr, pte);
2608     return 0;
2609 }
2610 
2611 /*
2612  * Give a chance to schedule after setting a key to 256 pages.
2613  * We only hold the mm lock, which is a rwsem and the kvm srcu.
2614  * Both can sleep.
2615  */
2616 static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
2617                   unsigned long next, struct mm_walk *walk)
2618 {
2619     cond_resched();
2620     return 0;
2621 }
2622 
2623 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
2624                       unsigned long hmask, unsigned long next,
2625                       struct mm_walk *walk)
2626 {
2627     pmd_t *pmd = (pmd_t *)pte;
2628     unsigned long start, end;
2629     struct page *page = pmd_page(*pmd);
2630 
2631     /*
2632      * The write check makes sure we do not set a key on shared
2633      * memory. This is needed as the walker does not differentiate
2634      * between actual guest memory and the process executable or
2635      * shared libraries.
2636      */
2637     if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
2638         !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
2639         return 0;
2640 
2641     start = pmd_val(*pmd) & HPAGE_MASK;
2642     end = start + HPAGE_SIZE - 1;
2643     __storage_key_init_range(start, end);
2644     set_bit(PG_arch_1, &page->flags);
2645     cond_resched();
2646     return 0;
2647 }
2648 
2649 static const struct mm_walk_ops enable_skey_walk_ops = {
2650     .hugetlb_entry      = __s390_enable_skey_hugetlb,
2651     .pte_entry      = __s390_enable_skey_pte,
2652     .pmd_entry      = __s390_enable_skey_pmd,
2653 };
2654 
2655 int s390_enable_skey(void)
2656 {
2657     struct mm_struct *mm = current->mm;
2658     int rc = 0;
2659 
2660     mmap_write_lock(mm);
2661     if (mm_uses_skeys(mm))
2662         goto out_up;
2663 
2664     mm->context.uses_skeys = 1;
2665     rc = gmap_mark_unmergeable();
2666     if (rc) {
2667         mm->context.uses_skeys = 0;
2668         goto out_up;
2669     }
2670     walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
2671 
2672 out_up:
2673     mmap_write_unlock(mm);
2674     return rc;
2675 }
2676 EXPORT_SYMBOL_GPL(s390_enable_skey);
2677 
2678 /*
2679  * Reset CMMA state, make all pages stable again.
2680  */
2681 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2682                  unsigned long next, struct mm_walk *walk)
2683 {
2684     ptep_zap_unused(walk->mm, addr, pte, 1);
2685     return 0;
2686 }
2687 
2688 static const struct mm_walk_ops reset_cmma_walk_ops = {
2689     .pte_entry      = __s390_reset_cmma,
2690 };
2691 
2692 void s390_reset_cmma(struct mm_struct *mm)
2693 {
2694     mmap_write_lock(mm);
2695     walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
2696     mmap_write_unlock(mm);
2697 }
2698 EXPORT_SYMBOL_GPL(s390_reset_cmma);
2699 
2700 #define GATHER_GET_PAGES 32
2701 
2702 struct reset_walk_state {
2703     unsigned long next;
2704     unsigned long count;
2705     unsigned long pfns[GATHER_GET_PAGES];
2706 };
2707 
2708 static int s390_gather_pages(pte_t *ptep, unsigned long addr,
2709                  unsigned long next, struct mm_walk *walk)
2710 {
2711     struct reset_walk_state *p = walk->private;
2712     pte_t pte = READ_ONCE(*ptep);
2713 
2714     if (pte_present(pte)) {
2715         /* we have a reference from the mapping, take an extra one */
2716         get_page(phys_to_page(pte_val(pte)));
2717         p->pfns[p->count] = phys_to_pfn(pte_val(pte));
2718         p->next = next;
2719         p->count++;
2720     }
2721     return p->count >= GATHER_GET_PAGES;
2722 }
2723 
2724 static const struct mm_walk_ops gather_pages_ops = {
2725     .pte_entry = s390_gather_pages,
2726 };
2727 
2728 /*
2729  * Call the Destroy secure page UVC on each page in the given array of PFNs.
2730  * Each page needs to have an extra reference, which will be released here.
2731  */
2732 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns)
2733 {
2734     unsigned long i;
2735 
2736     for (i = 0; i < count; i++) {
2737         /* we always have an extra reference */
2738         uv_destroy_owned_page(pfn_to_phys(pfns[i]));
2739         /* get rid of the extra reference */
2740         put_page(pfn_to_page(pfns[i]));
2741         cond_resched();
2742     }
2743 }
2744 EXPORT_SYMBOL_GPL(s390_uv_destroy_pfns);
2745 
2746 /**
2747  * __s390_uv_destroy_range - Call the destroy secure page UVC on each page
2748  * in the given range of the given address space.
2749  * @mm: the mm to operate on
2750  * @start: the start of the range
2751  * @end: the end of the range
2752  * @interruptible: if not 0, stop when a fatal signal is received
2753  *
2754  * Walk the given range of the given address space and call the destroy
2755  * secure page UVC on each page. Optionally exit early if a fatal signal is
2756  * pending.
2757  *
2758  * Return: 0 on success, -EINTR if the function stopped before completing
2759  */
2760 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
2761                 unsigned long end, bool interruptible)
2762 {
2763     struct reset_walk_state state = { .next = start };
2764     int r = 1;
2765 
2766     while (r > 0) {
2767         state.count = 0;
2768         mmap_read_lock(mm);
2769         r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state);
2770         mmap_read_unlock(mm);
2771         cond_resched();
2772         s390_uv_destroy_pfns(state.count, state.pfns);
2773         if (interruptible && fatal_signal_pending(current))
2774             return -EINTR;
2775     }
2776     return 0;
2777 }
2778 EXPORT_SYMBOL_GPL(__s390_uv_destroy_range);
2779 
2780 /**
2781  * s390_unlist_old_asce - Remove the topmost level of page tables from the
2782  * list of page tables of the gmap.
2783  * @gmap: the gmap whose table is to be removed
2784  *
2785  * On s390x, KVM keeps a list of all pages containing the page tables of the
2786  * gmap (the CRST list). This list is used at tear down time to free all
2787  * pages that are now not needed anymore.
2788  *
2789  * This function removes the topmost page of the tree (the one pointed to by
2790  * the ASCE) from the CRST list.
2791  *
2792  * This means that it will not be freed when the VM is torn down, and needs
2793  * to be handled separately by the caller, unless a leak is actually
2794  * intended. Notice that this function will only remove the page from the
2795  * list, the page will still be used as a top level page table (and ASCE).
2796  */
2797 void s390_unlist_old_asce(struct gmap *gmap)
2798 {
2799     struct page *old;
2800 
2801     old = virt_to_page(gmap->table);
2802     spin_lock(&gmap->guest_table_lock);
2803     list_del(&old->lru);
2804     /*
2805      * Sometimes the topmost page might need to be "removed" multiple
2806      * times, for example if the VM is rebooted into secure mode several
2807      * times concurrently, or if s390_replace_asce fails after calling
2808      * s390_remove_old_asce and is attempted again later. In that case
2809      * the old asce has been removed from the list, and therefore it
2810      * will not be freed when the VM terminates, but the ASCE is still
2811      * in use and still pointed to.
2812      * A subsequent call to replace_asce will follow the pointer and try
2813      * to remove the same page from the list again.
2814      * Therefore it's necessary that the page of the ASCE has valid
2815      * pointers, so list_del can work (and do nothing) without
2816      * dereferencing stale or invalid pointers.
2817      */
2818     INIT_LIST_HEAD(&old->lru);
2819     spin_unlock(&gmap->guest_table_lock);
2820 }
2821 EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
2822 
2823 /**
2824  * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
2825  * @gmap: the gmap whose ASCE needs to be replaced
2826  *
2827  * If the allocation of the new top level page table fails, the ASCE is not
2828  * replaced.
2829  * In any case, the old ASCE is always removed from the gmap CRST list.
2830  * Therefore the caller has to make sure to save a pointer to it
2831  * beforehand, unless a leak is actually intended.
2832  */
2833 int s390_replace_asce(struct gmap *gmap)
2834 {
2835     unsigned long asce;
2836     struct page *page;
2837     void *table;
2838 
2839     s390_unlist_old_asce(gmap);
2840 
2841     page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
2842     if (!page)
2843         return -ENOMEM;
2844     table = page_to_virt(page);
2845     memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
2846 
2847     /*
2848      * The caller has to deal with the old ASCE, but here we make sure
2849      * the new one is properly added to the CRST list, so that
2850      * it will be freed when the VM is torn down.
2851      */
2852     spin_lock(&gmap->guest_table_lock);
2853     list_add(&page->lru, &gmap->crst_list);
2854     spin_unlock(&gmap->guest_table_lock);
2855 
2856     /* Set new table origin while preserving existing ASCE control bits */
2857     asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table);
2858     WRITE_ONCE(gmap->asce, asce);
2859     WRITE_ONCE(gmap->mm->context.gmap_asce, asce);
2860     WRITE_ONCE(gmap->table, table);
2861 
2862     return 0;
2863 }
2864 EXPORT_SYMBOL_GPL(s390_replace_asce);