Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
0004  *
0005  * Copyright (C) 2003 David Gibson, IBM Corporation.
0006  *
0007  * Based on the IA-32 version:
0008  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
0009  */
0010 
0011 #include <linux/mm.h>
0012 #include <linux/hugetlb.h>
0013 #include <asm/cacheflush.h>
0014 #include <asm/machdep.h>
0015 
0016 unsigned int hpage_shift;
0017 EXPORT_SYMBOL(hpage_shift);
0018 
0019 #ifdef CONFIG_PPC_64S_HASH_MMU
0020 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
0021              pte_t *ptep, unsigned long trap, unsigned long flags,
0022              int ssize, unsigned int shift, unsigned int mmu_psize)
0023 {
0024     real_pte_t rpte;
0025     unsigned long vpn;
0026     unsigned long old_pte, new_pte;
0027     unsigned long rflags, pa;
0028     long slot, offset;
0029 
0030     BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
0031 
0032     /* Search the Linux page table for a match with va */
0033     vpn = hpt_vpn(ea, vsid, ssize);
0034 
0035     /*
0036      * At this point, we have a pte (old_pte) which can be used to build
0037      * or update an HPTE. There are 2 cases:
0038      *
0039      * 1. There is a valid (present) pte with no associated HPTE (this is
0040      *  the most common case)
0041      * 2. There is a valid (present) pte with an associated HPTE. The
0042      *  current values of the pp bits in the HPTE prevent access
0043      *  because we are doing software DIRTY bit management and the
0044      *  page is currently not DIRTY.
0045      */
0046 
0047 
0048     do {
0049         old_pte = pte_val(*ptep);
0050         /* If PTE busy, retry the access */
0051         if (unlikely(old_pte & H_PAGE_BUSY))
0052             return 0;
0053         /* If PTE permissions don't match, take page fault */
0054         if (unlikely(!check_pte_access(access, old_pte)))
0055             return 1;
0056 
0057         /*
0058          * Try to lock the PTE, add ACCESSED and DIRTY if it was
0059          * a write access
0060          */
0061         new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
0062         if (access & _PAGE_WRITE)
0063             new_pte |= _PAGE_DIRTY;
0064     } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
0065 
0066     /* Make sure this is a hugetlb entry */
0067     if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
0068         return 0;
0069 
0070     rflags = htab_convert_pte_flags(new_pte, flags);
0071     if (unlikely(mmu_psize == MMU_PAGE_16G))
0072         offset = PTRS_PER_PUD;
0073     else
0074         offset = PTRS_PER_PMD;
0075     rpte = __real_pte(__pte(old_pte), ptep, offset);
0076 
0077     if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
0078         /*
0079          * No CPU has hugepages but lacks no execute, so we
0080          * don't need to worry about that case
0081          */
0082         rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
0083 
0084     /* Check if pte already has an hpte (case 2) */
0085     if (unlikely(old_pte & H_PAGE_HASHPTE)) {
0086         /* There MIGHT be an HPTE for this pte */
0087         unsigned long gslot;
0088 
0089         gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0);
0090         if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, mmu_psize,
0091                            mmu_psize, ssize, flags) == -1)
0092             old_pte &= ~_PAGE_HPTEFLAGS;
0093     }
0094 
0095     if (likely(!(old_pte & H_PAGE_HASHPTE))) {
0096         unsigned long hash = hpt_hash(vpn, shift, ssize);
0097 
0098         pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
0099 
0100         /* clear HPTE slot informations in new PTE */
0101         new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
0102 
0103         slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
0104                          mmu_psize, ssize);
0105 
0106         /*
0107          * Hypervisor failure. Restore old pte and return -1
0108          * similar to __hash_page_*
0109          */
0110         if (unlikely(slot == -2)) {
0111             *ptep = __pte(old_pte);
0112             hash_failure_debug(ea, access, vsid, trap, ssize,
0113                        mmu_psize, mmu_psize, old_pte);
0114             return -1;
0115         }
0116 
0117         new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
0118     }
0119 
0120     /*
0121      * No need to use ldarx/stdcx here
0122      */
0123     *ptep = __pte(new_pte & ~H_PAGE_BUSY);
0124     return 0;
0125 }
0126 #endif
0127 
0128 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
0129                   unsigned long addr, pte_t *ptep)
0130 {
0131     unsigned long pte_val;
0132     /*
0133      * Clear the _PAGE_PRESENT so that no hardware parallel update is
0134      * possible. Also keep the pte_present true so that we don't take
0135      * wrong fault.
0136      */
0137     pte_val = pte_update(vma->vm_mm, addr, ptep,
0138                  _PAGE_PRESENT, _PAGE_INVALID, 1);
0139 
0140     return __pte(pte_val);
0141 }
0142 
0143 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
0144                   pte_t *ptep, pte_t old_pte, pte_t pte)
0145 {
0146 
0147     if (radix_enabled())
0148         return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
0149                                old_pte, pte);
0150     set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
0151 }
0152 
0153 void __init hugetlbpage_init_defaultsize(void)
0154 {
0155     /* Set default large page size. Currently, we pick 16M or 1M
0156      * depending on what is available
0157      */
0158     if (mmu_psize_defs[MMU_PAGE_16M].shift)
0159         hpage_shift = mmu_psize_defs[MMU_PAGE_16M].shift;
0160     else if (mmu_psize_defs[MMU_PAGE_1M].shift)
0161         hpage_shift = mmu_psize_defs[MMU_PAGE_1M].shift;
0162     else if (mmu_psize_defs[MMU_PAGE_2M].shift)
0163         hpage_shift = mmu_psize_defs[MMU_PAGE_2M].shift;
0164 }