Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
0003 
0004 #include <linux/init.h>
0005 #include <linux/mm.h>
0006 #include <linux/module.h>
0007 #include <linux/sched.h>
0008 
0009 #include <asm/mmu_context.h>
0010 #include <asm/setup.h>
0011 
0012 /*
0013  * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
0014  * 1VPN -> 2PFN
0015  */
0016 #define TLB_ENTRY_SIZE      (PAGE_SIZE * 2)
0017 #define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
0018 
0019 void flush_tlb_all(void)
0020 {
0021     tlb_invalid_all();
0022 }
0023 
0024 void flush_tlb_mm(struct mm_struct *mm)
0025 {
0026 #ifdef CONFIG_CPU_HAS_TLBI
0027     sync_is();
0028     asm volatile(
0029         "tlbi.asids %0  \n"
0030         "sync.i     \n"
0031         :
0032         : "r" (cpu_asid(mm))
0033         : "memory");
0034 #else
0035     tlb_invalid_all();
0036 #endif
0037 }
0038 
0039 /*
0040  * MMU operation regs only could invalid tlb entry in jtlb and we
0041  * need change asid field to invalid I-utlb & D-utlb.
0042  */
0043 #ifndef CONFIG_CPU_HAS_TLBI
0044 #define restore_asid_inv_utlb(oldpid, newpid) \
0045 do { \
0046     if (oldpid == newpid) \
0047         write_mmu_entryhi(oldpid + 1); \
0048     write_mmu_entryhi(oldpid); \
0049 } while (0)
0050 #endif
0051 
0052 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
0053             unsigned long end)
0054 {
0055     unsigned long newpid = cpu_asid(vma->vm_mm);
0056 
0057     start &= TLB_ENTRY_SIZE_MASK;
0058     end   += TLB_ENTRY_SIZE - 1;
0059     end   &= TLB_ENTRY_SIZE_MASK;
0060 
0061 #ifdef CONFIG_CPU_HAS_TLBI
0062     sync_is();
0063     while (start < end) {
0064         asm volatile(
0065             "tlbi.vas %0    \n"
0066             :
0067             : "r" (start | newpid)
0068             : "memory");
0069 
0070         start += 2*PAGE_SIZE;
0071     }
0072     asm volatile("sync.i\n");
0073 #else
0074     {
0075     unsigned long flags, oldpid;
0076 
0077     local_irq_save(flags);
0078     oldpid = read_mmu_entryhi() & ASID_MASK;
0079     while (start < end) {
0080         int idx;
0081 
0082         write_mmu_entryhi(start | newpid);
0083         start += 2*PAGE_SIZE;
0084         tlb_probe();
0085         idx = read_mmu_index();
0086         if (idx >= 0)
0087             tlb_invalid_indexed();
0088     }
0089     restore_asid_inv_utlb(oldpid, newpid);
0090     local_irq_restore(flags);
0091     }
0092 #endif
0093 }
0094 
0095 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
0096 {
0097     start &= TLB_ENTRY_SIZE_MASK;
0098     end   += TLB_ENTRY_SIZE - 1;
0099     end   &= TLB_ENTRY_SIZE_MASK;
0100 
0101 #ifdef CONFIG_CPU_HAS_TLBI
0102     sync_is();
0103     while (start < end) {
0104         asm volatile(
0105             "tlbi.vaas %0   \n"
0106             :
0107             : "r" (start)
0108             : "memory");
0109 
0110         start += 2*PAGE_SIZE;
0111     }
0112     asm volatile("sync.i\n");
0113 #else
0114     {
0115     unsigned long flags, oldpid;
0116 
0117     local_irq_save(flags);
0118     oldpid = read_mmu_entryhi() & ASID_MASK;
0119     while (start < end) {
0120         int idx;
0121 
0122         write_mmu_entryhi(start | oldpid);
0123         start += 2*PAGE_SIZE;
0124         tlb_probe();
0125         idx = read_mmu_index();
0126         if (idx >= 0)
0127             tlb_invalid_indexed();
0128     }
0129     restore_asid_inv_utlb(oldpid, oldpid);
0130     local_irq_restore(flags);
0131     }
0132 #endif
0133 }
0134 
0135 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
0136 {
0137     int newpid = cpu_asid(vma->vm_mm);
0138 
0139     addr &= TLB_ENTRY_SIZE_MASK;
0140 
0141 #ifdef CONFIG_CPU_HAS_TLBI
0142     sync_is();
0143     asm volatile(
0144         "tlbi.vas %0    \n"
0145         "sync.i     \n"
0146         :
0147         : "r" (addr | newpid)
0148         : "memory");
0149 #else
0150     {
0151     int oldpid, idx;
0152     unsigned long flags;
0153 
0154     local_irq_save(flags);
0155     oldpid = read_mmu_entryhi() & ASID_MASK;
0156     write_mmu_entryhi(addr | newpid);
0157     tlb_probe();
0158     idx = read_mmu_index();
0159     if (idx >= 0)
0160         tlb_invalid_indexed();
0161 
0162     restore_asid_inv_utlb(oldpid, newpid);
0163     local_irq_restore(flags);
0164     }
0165 #endif
0166 }
0167 
0168 void flush_tlb_one(unsigned long addr)
0169 {
0170     addr &= TLB_ENTRY_SIZE_MASK;
0171 
0172 #ifdef CONFIG_CPU_HAS_TLBI
0173     sync_is();
0174     asm volatile(
0175         "tlbi.vaas %0   \n"
0176         "sync.i     \n"
0177         :
0178         : "r" (addr)
0179         : "memory");
0180 #else
0181     {
0182     int oldpid, idx;
0183     unsigned long flags;
0184 
0185     local_irq_save(flags);
0186     oldpid = read_mmu_entryhi() & ASID_MASK;
0187     write_mmu_entryhi(addr | oldpid);
0188     tlb_probe();
0189     idx = read_mmu_index();
0190     if (idx >= 0)
0191         tlb_invalid_indexed();
0192 
0193     restore_asid_inv_utlb(oldpid, oldpid);
0194     local_irq_restore(flags);
0195     }
0196 #endif
0197 }
0198 EXPORT_SYMBOL(flush_tlb_one);