Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_HIGHMEM_H
0003 #define _ASM_HIGHMEM_H
0004 
0005 #include <asm/cachetype.h>
0006 #include <asm/fixmap.h>
0007 
0008 #define PKMAP_BASE      (PAGE_OFFSET - PMD_SIZE)
0009 #define LAST_PKMAP      PTRS_PER_PTE
0010 #define LAST_PKMAP_MASK     (LAST_PKMAP - 1)
0011 #define PKMAP_NR(virt)      (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
0012 #define PKMAP_ADDR(nr)      (PKMAP_BASE + ((nr) << PAGE_SHIFT))
0013 
0014 #define flush_cache_kmaps() \
0015     do { \
0016         if (cache_is_vivt()) \
0017             flush_cache_all(); \
0018     } while (0)
0019 
0020 extern pte_t *pkmap_page_table;
0021 
0022 /*
0023  * The reason for kmap_high_get() is to ensure that the currently kmap'd
0024  * page usage count does not decrease to zero while we're using its
0025  * existing virtual mapping in an atomic context.  With a VIVT cache this
0026  * is essential to do, but with a VIPT cache this is only an optimization
0027  * so not to pay the price of establishing a second mapping if an existing
0028  * one can be used.  However, on platforms without hardware TLB maintenance
0029  * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
0030  * the locking involved must also disable IRQs which is incompatible with
0031  * the IPI mechanism used by global TLB operations.
0032  */
0033 #define ARCH_NEEDS_KMAP_HIGH_GET
0034 #if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
0035 #undef ARCH_NEEDS_KMAP_HIGH_GET
0036 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
0037 #error "The sum of features in your kernel config cannot be supported together"
0038 #endif
0039 #endif
0040 
0041 /*
0042  * Needed to be able to broadcast the TLB invalidation for kmap.
0043  */
0044 #ifdef CONFIG_ARM_ERRATA_798181
0045 #undef ARCH_NEEDS_KMAP_HIGH_GET
0046 #endif
0047 
0048 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
0049 extern void *kmap_high_get(struct page *page);
0050 
0051 static inline void *arch_kmap_local_high_get(struct page *page)
0052 {
0053     if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
0054         return NULL;
0055     return kmap_high_get(page);
0056 }
0057 #define arch_kmap_local_high_get arch_kmap_local_high_get
0058 
0059 #else /* ARCH_NEEDS_KMAP_HIGH_GET */
0060 static inline void *kmap_high_get(struct page *page)
0061 {
0062     return NULL;
0063 }
0064 #endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
0065 
0066 #define arch_kmap_local_post_map(vaddr, pteval)             \
0067     local_flush_tlb_kernel_page(vaddr)
0068 
0069 #define arch_kmap_local_pre_unmap(vaddr)                \
0070 do {                                    \
0071     if (cache_is_vivt())                        \
0072         __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
0073 } while (0)
0074 
0075 #define arch_kmap_local_post_unmap(vaddr)               \
0076     local_flush_tlb_kernel_page(vaddr)
0077 
0078 #endif