Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
0003 #define _ASM_POWERPC_BOOK3S_64_HASH_H
0004 #ifdef __KERNEL__
0005 
0006 #include <asm/asm-const.h>
0007 
0008 /*
0009  * Common bits between 4K and 64K pages in a linux-style PTE.
0010  * Additional bits may be defined in pgtable-hash64-*.h
0011  *
0012  */
0013 #define H_PTE_NONE_MASK     _PAGE_HPTEFLAGS
0014 
0015 #ifdef CONFIG_PPC_64K_PAGES
0016 #include <asm/book3s/64/hash-64k.h>
0017 #else
0018 #include <asm/book3s/64/hash-4k.h>
0019 #endif
0020 
0021 #define H_PTRS_PER_PTE      (1 << H_PTE_INDEX_SIZE)
0022 #define H_PTRS_PER_PMD      (1 << H_PMD_INDEX_SIZE)
0023 #define H_PTRS_PER_PUD      (1 << H_PUD_INDEX_SIZE)
0024 
0025 /* Bits to set in a PMD/PUD/PGD entry valid bit*/
0026 #define HASH_PMD_VAL_BITS       (0x8000000000000000UL)
0027 #define HASH_PUD_VAL_BITS       (0x8000000000000000UL)
0028 #define HASH_PGD_VAL_BITS       (0x8000000000000000UL)
0029 
0030 /*
0031  * Size of EA range mapped by our pagetables.
0032  */
0033 #define H_PGTABLE_EADDR_SIZE    (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
0034                  H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
0035 #define H_PGTABLE_RANGE     (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
0036 /*
0037  * Top 2 bits are ignored in page table walk.
0038  */
0039 #define EA_MASK         (~(0xcUL << 60))
0040 
0041 /*
0042  * We store the slot details in the second half of page table.
0043  * Increase the pud level table so that hugetlb ptes can be stored
0044  * at pud level.
0045  */
0046 #if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
0047 #define H_PUD_CACHE_INDEX   (H_PUD_INDEX_SIZE + 1)
0048 #else
0049 #define H_PUD_CACHE_INDEX   (H_PUD_INDEX_SIZE)
0050 #endif
0051 
0052 /*
0053  * +------------------------------+
0054  * |                              |
0055  * |                              |
0056  * |                              |
0057  * +------------------------------+  Kernel virtual map end (0xc00e000000000000)
0058  * |                              |
0059  * |                              |
0060  * |      512TB/16TB of vmemmap   |
0061  * |                              |
0062  * |                              |
0063  * +------------------------------+  Kernel vmemmap  start
0064  * |                              |
0065  * |      512TB/16TB of IO map    |
0066  * |                              |
0067  * +------------------------------+  Kernel IO map start
0068  * |                              |
0069  * |      512TB/16TB of vmap      |
0070  * |                              |
0071  * +------------------------------+  Kernel virt start (0xc008000000000000)
0072  * |                              |
0073  * |                              |
0074  * |                              |
0075  * +------------------------------+  Kernel linear (0xc.....)
0076  */
0077 
0078 #define H_VMALLOC_START     H_KERN_VIRT_START
0079 #define H_VMALLOC_SIZE      H_KERN_MAP_SIZE
0080 #define H_VMALLOC_END       (H_VMALLOC_START + H_VMALLOC_SIZE)
0081 
0082 #define H_KERN_IO_START     H_VMALLOC_END
0083 #define H_KERN_IO_SIZE      H_KERN_MAP_SIZE
0084 #define H_KERN_IO_END       (H_KERN_IO_START + H_KERN_IO_SIZE)
0085 
0086 #define H_VMEMMAP_START     H_KERN_IO_END
0087 #define H_VMEMMAP_SIZE      H_KERN_MAP_SIZE
0088 #define H_VMEMMAP_END       (H_VMEMMAP_START + H_VMEMMAP_SIZE)
0089 
0090 #define NON_LINEAR_REGION_ID(ea)    ((((unsigned long)ea - H_KERN_VIRT_START) >> REGION_SHIFT) + 2)
0091 
0092 /*
0093  * Region IDs
0094  */
0095 #define USER_REGION_ID      0
0096 #define LINEAR_MAP_REGION_ID    1
0097 #define VMALLOC_REGION_ID   NON_LINEAR_REGION_ID(H_VMALLOC_START)
0098 #define IO_REGION_ID        NON_LINEAR_REGION_ID(H_KERN_IO_START)
0099 #define VMEMMAP_REGION_ID   NON_LINEAR_REGION_ID(H_VMEMMAP_START)
0100 #define INVALID_REGION_ID   (VMEMMAP_REGION_ID + 1)
0101 
0102 /*
0103  * Defines the address of the vmemap area, in its own region on
0104  * hash table CPUs.
0105  */
0106 
0107 /* PTEIDX nibble */
0108 #define _PTEIDX_SECONDARY   0x8
0109 #define _PTEIDX_GROUP_IX    0x7
0110 
0111 #define H_PMD_BAD_BITS      (PTE_TABLE_SIZE-1)
0112 #define H_PUD_BAD_BITS      (PMD_TABLE_SIZE-1)
0113 
0114 #ifndef __ASSEMBLY__
0115 static inline int get_region_id(unsigned long ea)
0116 {
0117     int region_id;
0118     int id = (ea >> 60UL);
0119 
0120     if (id == 0)
0121         return USER_REGION_ID;
0122 
0123     if (id != (PAGE_OFFSET >> 60))
0124         return INVALID_REGION_ID;
0125 
0126     if (ea < H_KERN_VIRT_START)
0127         return LINEAR_MAP_REGION_ID;
0128 
0129     BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
0130 
0131     region_id = NON_LINEAR_REGION_ID(ea);
0132     return region_id;
0133 }
0134 
0135 #define hash__pmd_bad(pmd)      (pmd_val(pmd) & H_PMD_BAD_BITS)
0136 #define hash__pud_bad(pud)      (pud_val(pud) & H_PUD_BAD_BITS)
0137 static inline int hash__p4d_bad(p4d_t p4d)
0138 {
0139     return (p4d_val(p4d) == 0);
0140 }
0141 #ifdef CONFIG_STRICT_KERNEL_RWX
0142 extern void hash__mark_rodata_ro(void);
0143 extern void hash__mark_initmem_nx(void);
0144 #endif
0145 
0146 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
0147                 pte_t *ptep, unsigned long pte, int huge);
0148 unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags);
0149 /* Atomic PTE updates */
0150 static inline unsigned long hash__pte_update(struct mm_struct *mm,
0151                      unsigned long addr,
0152                      pte_t *ptep, unsigned long clr,
0153                      unsigned long set,
0154                      int huge)
0155 {
0156     __be64 old_be, tmp_be;
0157     unsigned long old;
0158 
0159     __asm__ __volatile__(
0160     "1: ldarx   %0,0,%3     # pte_update\n\
0161     and.    %1,%0,%6\n\
0162     bne-    1b \n\
0163     andc    %1,%0,%4 \n\
0164     or  %1,%1,%7\n\
0165     stdcx.  %1,0,%3 \n\
0166     bne-    1b"
0167     : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
0168     : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
0169       "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
0170     : "cc" );
0171     /* huge pages use the old page table lock */
0172     if (!huge)
0173         assert_pte_locked(mm, addr);
0174 
0175     old = be64_to_cpu(old_be);
0176     if (old & H_PAGE_HASHPTE)
0177         hpte_need_flush(mm, addr, ptep, old, huge);
0178 
0179     return old;
0180 }
0181 
0182 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
0183  * function doesn't need to flush the hash entry
0184  */
0185 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
0186 {
0187     __be64 old, tmp, val, mask;
0188 
0189     mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
0190                _PAGE_EXEC | _PAGE_SOFT_DIRTY);
0191 
0192     val = pte_raw(entry) & mask;
0193 
0194     __asm__ __volatile__(
0195     "1: ldarx   %0,0,%4\n\
0196         and.    %1,%0,%6\n\
0197         bne-    1b \n\
0198         or  %0,%3,%0\n\
0199         stdcx.  %0,0,%4\n\
0200         bne-    1b"
0201     :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
0202     :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
0203     :"cc");
0204 }
0205 
0206 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
0207 {
0208     return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
0209 }
0210 
0211 static inline int hash__pte_none(pte_t pte)
0212 {
0213     return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
0214 }
0215 
0216 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
0217         int ssize, real_pte_t rpte, unsigned int subpg_index);
0218 
0219 /* This low level function performs the actual PTE insertion
0220  * Setting the PTE depends on the MMU type and other factors. It's
0221  * an horrible mess that I'm not going to try to clean up now but
0222  * I'm keeping it in one place rather than spread around
0223  */
0224 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
0225                   pte_t *ptep, pte_t pte, int percpu)
0226 {
0227     /*
0228      * Anything else just stores the PTE normally. That covers all 64-bit
0229      * cases, and 32-bit non-hash with 32-bit PTEs.
0230      */
0231     *ptep = pte;
0232 }
0233 
0234 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0235 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
0236                    pmd_t *pmdp, unsigned long old_pmd);
0237 #else
0238 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
0239                       unsigned long addr, pmd_t *pmdp,
0240                       unsigned long old_pmd)
0241 {
0242     WARN(1, "%s called with THP disabled\n", __func__);
0243 }
0244 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
0245 
0246 
0247 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
0248 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
0249                           unsigned long page_size,
0250                           unsigned long phys);
0251 extern void hash__vmemmap_remove_mapping(unsigned long start,
0252                      unsigned long page_size);
0253 
0254 int hash__create_section_mapping(unsigned long start, unsigned long end,
0255                  int nid, pgprot_t prot);
0256 int hash__remove_section_mapping(unsigned long start, unsigned long end);
0257 
0258 void hash__kernel_map_pages(struct page *page, int numpages, int enable);
0259 
0260 #endif /* !__ASSEMBLY__ */
0261 #endif /* __KERNEL__ */
0262 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */