Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * PPC Huge TLB Page Support for Book3E MMU
0004  *
0005  * Copyright (C) 2009 David Gibson, IBM Corporation.
0006  * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
0007  *
0008  */
0009 #include <linux/mm.h>
0010 #include <linux/hugetlb.h>
0011 
0012 #include <asm/mmu.h>
0013 
0014 #ifdef CONFIG_PPC64
0015 #include <asm/paca.h>
0016 
0017 static inline int tlb1_next(void)
0018 {
0019     struct paca_struct *paca = get_paca();
0020     struct tlb_core_data *tcd;
0021     int this, next;
0022 
0023     tcd = paca->tcd_ptr;
0024     this = tcd->esel_next;
0025 
0026     next = this + 1;
0027     if (next >= tcd->esel_max)
0028         next = tcd->esel_first;
0029 
0030     tcd->esel_next = next;
0031     return this;
0032 }
0033 
0034 static inline void book3e_tlb_lock(void)
0035 {
0036     struct paca_struct *paca = get_paca();
0037     unsigned long tmp;
0038     int token = smp_processor_id() + 1;
0039 
0040     /*
0041      * Besides being unnecessary in the absence of SMT, this
0042      * check prevents trying to do lbarx/stbcx. on e5500 which
0043      * doesn't implement either feature.
0044      */
0045     if (!cpu_has_feature(CPU_FTR_SMT))
0046         return;
0047 
0048     asm volatile("1: lbarx %0, 0, %1;"
0049              "cmpwi %0, 0;"
0050              "bne 2f;"
0051              "stbcx. %2, 0, %1;"
0052              "bne 1b;"
0053              "b 3f;"
0054              "2: lbzx %0, 0, %1;"
0055              "cmpwi %0, 0;"
0056              "bne 2b;"
0057              "b 1b;"
0058              "3:"
0059              : "=&r" (tmp)
0060              : "r" (&paca->tcd_ptr->lock), "r" (token)
0061              : "memory");
0062 }
0063 
0064 static inline void book3e_tlb_unlock(void)
0065 {
0066     struct paca_struct *paca = get_paca();
0067 
0068     if (!cpu_has_feature(CPU_FTR_SMT))
0069         return;
0070 
0071     isync();
0072     paca->tcd_ptr->lock = 0;
0073 }
0074 #else
0075 static inline int tlb1_next(void)
0076 {
0077     int index, ncams;
0078 
0079     ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
0080 
0081     index = this_cpu_read(next_tlbcam_idx);
0082 
0083     /* Just round-robin the entries and wrap when we hit the end */
0084     if (unlikely(index == ncams - 1))
0085         __this_cpu_write(next_tlbcam_idx, tlbcam_index);
0086     else
0087         __this_cpu_inc(next_tlbcam_idx);
0088 
0089     return index;
0090 }
0091 
0092 static inline void book3e_tlb_lock(void)
0093 {
0094 }
0095 
0096 static inline void book3e_tlb_unlock(void)
0097 {
0098 }
0099 #endif
0100 
0101 static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
0102 {
0103     int found = 0;
0104 
0105     mtspr(SPRN_MAS6, pid << 16);
0106     asm volatile(
0107         "tlbsx  0,%1\n"
0108         "mfspr  %0,0x271\n"
0109         "srwi   %0,%0,31\n"
0110         : "=&r"(found) : "r"(ea));
0111 
0112     return found;
0113 }
0114 
0115 static void
0116 book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
0117 {
0118     unsigned long mas1, mas2;
0119     u64 mas7_3;
0120     unsigned long psize, tsize, shift;
0121     unsigned long flags;
0122     struct mm_struct *mm;
0123     int index;
0124 
0125     if (unlikely(is_kernel_addr(ea)))
0126         return;
0127 
0128     mm = vma->vm_mm;
0129 
0130     psize = vma_mmu_pagesize(vma);
0131     shift = __ilog2(psize);
0132     tsize = shift - 10;
0133     /*
0134      * We can't be interrupted while we're setting up the MAS
0135      * registers or after we've confirmed that no tlb exists.
0136      */
0137     local_irq_save(flags);
0138 
0139     book3e_tlb_lock();
0140 
0141     if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
0142         book3e_tlb_unlock();
0143         local_irq_restore(flags);
0144         return;
0145     }
0146 
0147     /* We have to use the CAM(TLB1) on FSL parts for hugepages */
0148     index = tlb1_next();
0149     mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
0150 
0151     mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
0152     mas2 = ea & ~((1UL << shift) - 1);
0153     mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
0154     mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
0155     mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
0156     if (!pte_dirty(pte))
0157         mas7_3 &= ~(MAS3_SW|MAS3_UW);
0158 
0159     mtspr(SPRN_MAS1, mas1);
0160     mtspr(SPRN_MAS2, mas2);
0161 
0162     if (mmu_has_feature(MMU_FTR_BIG_PHYS))
0163         mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
0164     mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
0165 
0166     asm volatile ("tlbwe");
0167 
0168     book3e_tlb_unlock();
0169     local_irq_restore(flags);
0170 }
0171 
0172 /*
0173  * This is called at the end of handling a user page fault, when the
0174  * fault has been handled by updating a PTE in the linux page tables.
0175  *
0176  * This must always be called with the pte lock held.
0177  */
0178 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
0179 {
0180     if (is_vm_hugetlb_page(vma))
0181         book3e_hugetlb_preload(vma, address, *ptep);
0182 }
0183 
0184 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
0185 {
0186     struct hstate *hstate = hstate_file(vma->vm_file);
0187     unsigned long tsize = huge_page_shift(hstate) - 10;
0188 
0189     __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
0190 }