Back to home page

LXR

 
 

    


0001 /*
0002  * arch/xtensa/mm/tlb.c
0003  *
0004  * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
0005  *
0006  * This file is subject to the terms and conditions of the GNU General Public
0007  * License.  See the file "COPYING" in the main directory of this archive
0008  * for more details.
0009  *
0010  * Copyright (C) 2001 - 2003 Tensilica Inc.
0011  *
0012  * Joe Taylor
0013  * Chris Zankel <chris@zankel.net>
0014  * Marc Gauthier
0015  */
0016 
0017 #include <linux/mm.h>
0018 #include <asm/processor.h>
0019 #include <asm/mmu_context.h>
0020 #include <asm/tlbflush.h>
0021 #include <asm/cacheflush.h>
0022 
0023 
0024 static inline void __flush_itlb_all (void)
0025 {
0026     int w, i;
0027 
0028     for (w = 0; w < ITLB_ARF_WAYS; w++) {
0029         for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
0030             int e = w + (i << PAGE_SHIFT);
0031             invalidate_itlb_entry_no_isync(e);
0032         }
0033     }
0034     asm volatile ("isync\n");
0035 }
0036 
0037 static inline void __flush_dtlb_all (void)
0038 {
0039     int w, i;
0040 
0041     for (w = 0; w < DTLB_ARF_WAYS; w++) {
0042         for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
0043             int e = w + (i << PAGE_SHIFT);
0044             invalidate_dtlb_entry_no_isync(e);
0045         }
0046     }
0047     asm volatile ("isync\n");
0048 }
0049 
0050 
0051 void local_flush_tlb_all(void)
0052 {
0053     __flush_itlb_all();
0054     __flush_dtlb_all();
0055 }
0056 
0057 /* If mm is current, we simply assign the current task a new ASID, thus,
0058  * invalidating all previous tlb entries. If mm is someone else's user mapping,
0059  * wie invalidate the context, thus, when that user mapping is swapped in,
0060  * a new context will be assigned to it.
0061  */
0062 
0063 void local_flush_tlb_mm(struct mm_struct *mm)
0064 {
0065     int cpu = smp_processor_id();
0066 
0067     if (mm == current->active_mm) {
0068         unsigned long flags;
0069         local_irq_save(flags);
0070         mm->context.asid[cpu] = NO_CONTEXT;
0071         activate_context(mm, cpu);
0072         local_irq_restore(flags);
0073     } else {
0074         mm->context.asid[cpu] = NO_CONTEXT;
0075         mm->context.cpu = -1;
0076     }
0077 }
0078 
0079 
0080 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
0081 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
0082 #if _ITLB_ENTRIES > _DTLB_ENTRIES
0083 # define _TLB_ENTRIES _ITLB_ENTRIES
0084 #else
0085 # define _TLB_ENTRIES _DTLB_ENTRIES
0086 #endif
0087 
0088 void local_flush_tlb_range(struct vm_area_struct *vma,
0089         unsigned long start, unsigned long end)
0090 {
0091     int cpu = smp_processor_id();
0092     struct mm_struct *mm = vma->vm_mm;
0093     unsigned long flags;
0094 
0095     if (mm->context.asid[cpu] == NO_CONTEXT)
0096         return;
0097 
0098 #if 0
0099     printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
0100             (unsigned long)mm->context.asid[cpu], start, end);
0101 #endif
0102     local_irq_save(flags);
0103 
0104     if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
0105         int oldpid = get_rasid_register();
0106 
0107         set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
0108         start &= PAGE_MASK;
0109         if (vma->vm_flags & VM_EXEC)
0110             while(start < end) {
0111                 invalidate_itlb_mapping(start);
0112                 invalidate_dtlb_mapping(start);
0113                 start += PAGE_SIZE;
0114             }
0115         else
0116             while(start < end) {
0117                 invalidate_dtlb_mapping(start);
0118                 start += PAGE_SIZE;
0119             }
0120 
0121         set_rasid_register(oldpid);
0122     } else {
0123         local_flush_tlb_mm(mm);
0124     }
0125     local_irq_restore(flags);
0126 }
0127 
0128 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
0129 {
0130     int cpu = smp_processor_id();
0131     struct mm_struct* mm = vma->vm_mm;
0132     unsigned long flags;
0133     int oldpid;
0134 
0135     if (mm->context.asid[cpu] == NO_CONTEXT)
0136         return;
0137 
0138     local_irq_save(flags);
0139 
0140     oldpid = get_rasid_register();
0141     set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
0142 
0143     if (vma->vm_flags & VM_EXEC)
0144         invalidate_itlb_mapping(page);
0145     invalidate_dtlb_mapping(page);
0146 
0147     set_rasid_register(oldpid);
0148 
0149     local_irq_restore(flags);
0150 }
0151 
0152 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
0153 {
0154     if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
0155         end - start < _TLB_ENTRIES << PAGE_SHIFT) {
0156         start &= PAGE_MASK;
0157         while (start < end) {
0158             invalidate_itlb_mapping(start);
0159             invalidate_dtlb_mapping(start);
0160             start += PAGE_SIZE;
0161         }
0162     } else {
0163         local_flush_tlb_all();
0164     }
0165 }
0166 
0167 #ifdef CONFIG_DEBUG_TLB_SANITY
0168 
0169 static unsigned get_pte_for_vaddr(unsigned vaddr)
0170 {
0171     struct task_struct *task = get_current();
0172     struct mm_struct *mm = task->mm;
0173     pgd_t *pgd;
0174     pmd_t *pmd;
0175     pte_t *pte;
0176 
0177     if (!mm)
0178         mm = task->active_mm;
0179     pgd = pgd_offset(mm, vaddr);
0180     if (pgd_none_or_clear_bad(pgd))
0181         return 0;
0182     pmd = pmd_offset(pgd, vaddr);
0183     if (pmd_none_or_clear_bad(pmd))
0184         return 0;
0185     pte = pte_offset_map(pmd, vaddr);
0186     if (!pte)
0187         return 0;
0188     return pte_val(*pte);
0189 }
0190 
0191 enum {
0192     TLB_SUSPICIOUS  = 1,
0193     TLB_INSANE  = 2,
0194 };
0195 
0196 static void tlb_insane(void)
0197 {
0198     BUG_ON(1);
0199 }
0200 
0201 static void tlb_suspicious(void)
0202 {
0203     WARN_ON(1);
0204 }
0205 
0206 /*
0207  * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
0208  * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
0209  *
0210  * Check that valid TLB entries either have the same PA as the PTE, or PTE is
0211  * marked as non-present. Non-present PTE and the page with non-zero refcount
0212  * and zero mapcount is normal for batched TLB flush operation. Zero refcount
0213  * means that the page was freed prematurely. Non-zero mapcount is unusual,
0214  * but does not necessary means an error, thus marked as suspicious.
0215  */
0216 static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
0217 {
0218     unsigned tlbidx = w | (e << PAGE_SHIFT);
0219     unsigned r0 = dtlb ?
0220         read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
0221     unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
0222     unsigned pte = get_pte_for_vaddr(vpn);
0223     unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
0224     unsigned tlb_asid = r0 & ASID_MASK;
0225     bool kernel = tlb_asid == 1;
0226     int rc = 0;
0227 
0228     if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
0229         pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
0230                 dtlb ? 'D' : 'I', w, e, vpn,
0231                 kernel ? "kernel" : "user");
0232         rc |= TLB_INSANE;
0233     }
0234 
0235     if (tlb_asid == mm_asid) {
0236         unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
0237             read_itlb_translation(tlbidx);
0238         if ((pte ^ r1) & PAGE_MASK) {
0239             pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
0240                     dtlb ? 'D' : 'I', w, e, r0, r1, pte);
0241             if (pte == 0 || !pte_present(__pte(pte))) {
0242                 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
0243                 pr_err("page refcount: %d, mapcount: %d\n",
0244                         page_count(p),
0245                         page_mapcount(p));
0246                 if (!page_count(p))
0247                     rc |= TLB_INSANE;
0248                 else if (page_mapcount(p))
0249                     rc |= TLB_SUSPICIOUS;
0250             } else {
0251                 rc |= TLB_INSANE;
0252             }
0253         }
0254     }
0255     return rc;
0256 }
0257 
0258 void check_tlb_sanity(void)
0259 {
0260     unsigned long flags;
0261     unsigned w, e;
0262     int bug = 0;
0263 
0264     local_irq_save(flags);
0265     for (w = 0; w < DTLB_ARF_WAYS; ++w)
0266         for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
0267             bug |= check_tlb_entry(w, e, true);
0268     for (w = 0; w < ITLB_ARF_WAYS; ++w)
0269         for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
0270             bug |= check_tlb_entry(w, e, false);
0271     if (bug & TLB_INSANE)
0272         tlb_insane();
0273     if (bug & TLB_SUSPICIOUS)
0274         tlb_suspicious();
0275     local_irq_restore(flags);
0276 }
0277 
0278 #endif /* CONFIG_DEBUG_TLB_SANITY */