Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  arch/arm/include/asm/tlb.h
0004  *
0005  *  Copyright (C) 2002 Russell King
0006  *
0007  *  Experimentation shows that on a StrongARM, it appears to be faster
0008  *  to use the "invalidate whole tlb" rather than "invalidate single
0009  *  tlb" for this.
0010  *
0011  *  This appears true for both the process fork+exit case, as well as
0012  *  the munmap-large-area case.
0013  */
0014 #ifndef __ASMARM_TLB_H
0015 #define __ASMARM_TLB_H
0016 
0017 #include <asm/cacheflush.h>
0018 
0019 #ifndef CONFIG_MMU
0020 
0021 #include <linux/pagemap.h>
0022 
0023 #define tlb_flush(tlb)  ((void) tlb)
0024 
0025 #include <asm-generic/tlb.h>
0026 
0027 #else /* !CONFIG_MMU */
0028 
0029 #include <linux/swap.h>
0030 #include <asm/tlbflush.h>
0031 
0032 static inline void __tlb_remove_table(void *_table)
0033 {
0034     free_page_and_swap_cache((struct page *)_table);
0035 }
0036 
0037 #include <asm-generic/tlb.h>
0038 
0039 static inline void
0040 __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
0041 {
0042     pgtable_pte_page_dtor(pte);
0043 
0044 #ifndef CONFIG_ARM_LPAE
0045     /*
0046      * With the classic ARM MMU, a pte page has two corresponding pmd
0047      * entries, each covering 1MB.
0048      */
0049     addr = (addr & PMD_MASK) + SZ_1M;
0050     __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
0051 #endif
0052 
0053     tlb_remove_table(tlb, pte);
0054 }
0055 
0056 static inline void
0057 __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
0058 {
0059 #ifdef CONFIG_ARM_LPAE
0060     struct page *page = virt_to_page(pmdp);
0061 
0062     pgtable_pmd_page_dtor(page);
0063     tlb_remove_table(tlb, page);
0064 #endif
0065 }
0066 
0067 #endif /* CONFIG_MMU */
0068 #endif