Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
0003 #define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
0004 
0005 #include <asm-generic/pgtable-nop4d.h>
0006 
0007 /*
0008  * Entries per page directory level.  The PTE level must use a 64b record
0009  * for each page table entry.  The PMD and PGD level use a 32b record for
0010  * each entry by assuming that each entry is page aligned.
0011  */
0012 #define PTE_INDEX_SIZE  9
0013 #define PMD_INDEX_SIZE  7
0014 #define PUD_INDEX_SIZE  9
0015 #define PGD_INDEX_SIZE  9
0016 
0017 #ifndef __ASSEMBLY__
0018 #define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
0019 #define PMD_TABLE_SIZE  (sizeof(pmd_t) << PMD_INDEX_SIZE)
0020 #define PUD_TABLE_SIZE  (sizeof(pud_t) << PUD_INDEX_SIZE)
0021 #define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
0022 #endif  /* __ASSEMBLY__ */
0023 
0024 #define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
0025 #define PTRS_PER_PMD    (1 << PMD_INDEX_SIZE)
0026 #define PTRS_PER_PUD    (1 << PUD_INDEX_SIZE)
0027 #define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
0028 
0029 /* PMD_SHIFT determines what a second-level page table entry can map */
0030 #define PMD_SHIFT   (PAGE_SHIFT + PTE_INDEX_SIZE)
0031 #define PMD_SIZE    (1UL << PMD_SHIFT)
0032 #define PMD_MASK    (~(PMD_SIZE-1))
0033 
0034 /* PUD_SHIFT determines what a third-level page table entry can map */
0035 #define PUD_SHIFT   (PMD_SHIFT + PMD_INDEX_SIZE)
0036 #define PUD_SIZE    (1UL << PUD_SHIFT)
0037 #define PUD_MASK    (~(PUD_SIZE-1))
0038 
0039 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
0040 #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
0041 #define PGDIR_SIZE  (1UL << PGDIR_SHIFT)
0042 #define PGDIR_MASK  (~(PGDIR_SIZE-1))
0043 
0044 /* Bits to mask out from a PMD to get to the PTE page */
0045 #define PMD_MASKED_BITS     0
0046 /* Bits to mask out from a PUD to get to the PMD page */
0047 #define PUD_MASKED_BITS     0
0048 /* Bits to mask out from a P4D to get to the PUD page */
0049 #define P4D_MASKED_BITS     0
0050 
0051 
0052 /*
0053  * 4-level page tables related bits
0054  */
0055 
0056 #define p4d_none(p4d)       (!p4d_val(p4d))
0057 #define p4d_bad(p4d)        (p4d_val(p4d) == 0)
0058 #define p4d_present(p4d)    (p4d_val(p4d) != 0)
0059 
0060 #ifndef __ASSEMBLY__
0061 
0062 static inline pud_t *p4d_pgtable(p4d_t p4d)
0063 {
0064     return (pud_t *) (p4d_val(p4d) & ~P4D_MASKED_BITS);
0065 }
0066 
0067 static inline void p4d_clear(p4d_t *p4dp)
0068 {
0069     *p4dp = __p4d(0);
0070 }
0071 
0072 static inline pte_t p4d_pte(p4d_t p4d)
0073 {
0074     return __pte(p4d_val(p4d));
0075 }
0076 
0077 static inline p4d_t pte_p4d(pte_t pte)
0078 {
0079     return __p4d(pte_val(pte));
0080 }
0081 extern struct page *p4d_page(p4d_t p4d);
0082 
0083 #endif /* !__ASSEMBLY__ */
0084 
0085 #define pud_ERROR(e) \
0086     pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
0087 
0088 /*
0089  * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
0090 #define remap_4k_pfn(vma, addr, pfn, prot)  \
0091     remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
0092 
0093 #endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */