Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 #ifndef _ASM_POWERPC_PAGE_H
0003 #define _ASM_POWERPC_PAGE_H
0004 
0005 /*
0006  * Copyright (C) 2001,2005 IBM Corporation.
0007  */
0008 
0009 #ifndef __ASSEMBLY__
0010 #include <linux/types.h>
0011 #include <linux/kernel.h>
0012 #else
0013 #include <asm/types.h>
0014 #endif
0015 #include <asm/asm-const.h>
0016 
0017 /*
0018  * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
0019  * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
0020  * page size. When using 64K pages however, whether we are really supporting
0021  * 64K pages in HW or not is irrelevant to those definitions.
0022  */
0023 #define PAGE_SHIFT      CONFIG_PPC_PAGE_SHIFT
0024 #define PAGE_SIZE       (ASM_CONST(1) << PAGE_SHIFT)
0025 
0026 #ifndef __ASSEMBLY__
0027 #ifndef CONFIG_HUGETLB_PAGE
0028 #define HPAGE_SHIFT PAGE_SHIFT
0029 #elif defined(CONFIG_PPC_BOOK3S_64)
0030 extern unsigned int hpage_shift;
0031 #define HPAGE_SHIFT hpage_shift
0032 #elif defined(CONFIG_PPC_8xx)
0033 #define HPAGE_SHIFT     19  /* 512k pages */
0034 #elif defined(CONFIG_PPC_FSL_BOOK3E)
0035 #define HPAGE_SHIFT     22  /* 4M pages */
0036 #endif
0037 #define HPAGE_SIZE      ((1UL) << HPAGE_SHIFT)
0038 #define HPAGE_MASK      (~(HPAGE_SIZE - 1))
0039 #define HUGETLB_PAGE_ORDER  (HPAGE_SHIFT - PAGE_SHIFT)
0040 #define HUGE_MAX_HSTATE     (MMU_PAGE_COUNT-1)
0041 #endif
0042 
0043 /*
0044  * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
0045  * assign PAGE_MASK to a larger type it gets extended the way we want
0046  * (i.e. with 1s in the high bits)
0047  */
0048 #define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
0049 
0050 /*
0051  * KERNELBASE is the virtual address of the start of the kernel, it's often
0052  * the same as PAGE_OFFSET, but _might not be_.
0053  *
0054  * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
0055  *
0056  * PAGE_OFFSET is the virtual address of the start of lowmem.
0057  *
0058  * PHYSICAL_START is the physical address of the start of the kernel.
0059  *
0060  * MEMORY_START is the physical address of the start of lowmem.
0061  *
0062  * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
0063  * ppc32 and based on how they are set we determine MEMORY_START.
0064  *
0065  * For the linear mapping the following equation should be true:
0066  * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
0067  *
0068  * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
0069  *
0070  * There are two ways to determine a physical address from a virtual one:
0071  * va = pa + PAGE_OFFSET - MEMORY_START
0072  * va = pa + KERNELBASE - PHYSICAL_START
0073  *
0074  * If you want to know something's offset from the start of the kernel you
0075  * should subtract KERNELBASE.
0076  *
0077  * If you want to test if something's a kernel address, use is_kernel_addr().
0078  */
0079 
0080 #define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
0081 #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
0082 #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
0083 
0084 #if defined(CONFIG_NONSTATIC_KERNEL)
0085 #ifndef __ASSEMBLY__
0086 
0087 extern phys_addr_t memstart_addr;
0088 extern phys_addr_t kernstart_addr;
0089 
0090 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
0091 extern long long virt_phys_offset;
0092 #endif
0093 
0094 #endif /* __ASSEMBLY__ */
0095 #define PHYSICAL_START  kernstart_addr
0096 
0097 #else   /* !CONFIG_NONSTATIC_KERNEL */
0098 #define PHYSICAL_START  ASM_CONST(CONFIG_PHYSICAL_START)
0099 #endif
0100 
0101 /* See Description below for VIRT_PHYS_OFFSET */
0102 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
0103 #ifdef CONFIG_RELOCATABLE
0104 #define VIRT_PHYS_OFFSET virt_phys_offset
0105 #else
0106 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
0107 #endif
0108 #endif
0109 
0110 #ifdef CONFIG_PPC64
0111 #define MEMORY_START    0UL
0112 #elif defined(CONFIG_NONSTATIC_KERNEL)
0113 #define MEMORY_START    memstart_addr
0114 #else
0115 #define MEMORY_START    (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
0116 #endif
0117 
0118 #ifdef CONFIG_FLATMEM
0119 #define ARCH_PFN_OFFSET     ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
0120 #ifndef __ASSEMBLY__
0121 extern unsigned long max_mapnr;
0122 static inline bool pfn_valid(unsigned long pfn)
0123 {
0124     unsigned long min_pfn = ARCH_PFN_OFFSET;
0125 
0126     return pfn >= min_pfn && pfn < max_mapnr;
0127 }
0128 #endif
0129 #endif
0130 
0131 #define virt_to_pfn(kaddr)  (__pa(kaddr) >> PAGE_SHIFT)
0132 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
0133 #define pfn_to_kaddr(pfn)   __va((pfn) << PAGE_SHIFT)
0134 
0135 #define virt_addr_valid(vaddr)  ({                  \
0136     unsigned long _addr = (unsigned long)vaddr;         \
0137     _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory &&   \
0138     pfn_valid(virt_to_pfn(_addr));                  \
0139 })
0140 
0141 /*
0142  * On Book-E parts we need __va to parse the device tree and we can't
0143  * determine MEMORY_START until then.  However we can determine PHYSICAL_START
0144  * from information at hand (program counter, TLB lookup).
0145  *
0146  * On BookE with RELOCATABLE && PPC32
0147  *
0148  *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
0149  *   address without any restriction on the page alignment.
0150  *
0151  *   We find the runtime address of _stext and relocate ourselves based on 
0152  *   the following calculation:
0153  *
0154  *        virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
0155  *                  MODULO(_stext.run,256M)
0156  *   and create the following mapping:
0157  *
0158  *    ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
0159  *
0160  *   When we process relocations, we cannot depend on the
0161  *   existing equation for the __va()/__pa() translations:
0162  *
0163  *     __va(x) = (x)  - PHYSICAL_START + KERNELBASE
0164  *
0165  *   Where:
0166  *       PHYSICAL_START = kernstart_addr = Physical address of _stext
0167  *       KERNELBASE = Compiled virtual address of _stext.
0168  *
0169  *   This formula holds true iff, kernel load address is TLB page aligned.
0170  *
0171  *   In our case, we need to also account for the shift in the kernel Virtual 
0172  *   address.
0173  *
0174  *   E.g.,
0175  *
0176  *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
0177  *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
0178  *
0179  *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
0180  *                 = 0xbc100000 , which is wrong.
0181  *
0182  *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
0183  *          according to our mapping.
0184  *
0185  *   Hence we use the following formula to get the translations right:
0186  *
0187  *    __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
0188  *
0189  *    Where :
0190  *      PHYSICAL_START = dynamic load address.(kernstart_addr variable)
0191  *      Effective KERNELBASE = virtual_base =
0192  *                   = ALIGN_DOWN(KERNELBASE,256M) +
0193  *                      MODULO(PHYSICAL_START,256M)
0194  *
0195  *  To make the cost of __va() / __pa() more light weight, we introduce
0196  *  a new variable virt_phys_offset, which will hold :
0197  *
0198  *  virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
0199  *           = ALIGN_DOWN(KERNELBASE,256M) - 
0200  *              ALIGN_DOWN(PHYSICALSTART,256M)
0201  *
0202  *  Hence :
0203  *
0204  *  __va(x) = x - PHYSICAL_START + Effective KERNELBASE
0205  *      = x + virt_phys_offset
0206  *
0207  *      and
0208  *  __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
0209  *      = x - virt_phys_offset
0210  *      
0211  * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
0212  * the other definitions for __va & __pa.
0213  */
0214 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
0215 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
0216 #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
0217 #else
0218 #ifdef CONFIG_PPC64
0219 
0220 #define VIRTUAL_WARN_ON(x)  WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
0221 
0222 /*
0223  * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
0224  * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
0225  * This also results in better code generation.
0226  */
0227 #define __va(x)                             \
0228 ({                                  \
0229     VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET);     \
0230     (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET);    \
0231 })
0232 
0233 #define __pa(x)                             \
0234 ({                                  \
0235     VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET);      \
0236     (unsigned long)(x) & 0x0fffffffffffffffUL;          \
0237 })
0238 
0239 #else /* 32-bit, non book E */
0240 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
0241 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
0242 #endif
0243 #endif
0244 
0245 /*
0246  * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
0247  * and needs to be executable.  This means the whole heap ends
0248  * up being executable.
0249  */
0250 #define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
0251 #define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
0252 
0253 #ifdef __powerpc64__
0254 #include <asm/page_64.h>
0255 #else
0256 #include <asm/page_32.h>
0257 #endif
0258 
0259 /*
0260  * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
0261  * "kernelness", use is_kernel_addr() - it should do what you want.
0262  */
0263 #ifdef CONFIG_PPC_BOOK3E_64
0264 #define is_kernel_addr(x)   ((x) >= 0x8000000000000000ul)
0265 #elif defined(CONFIG_PPC_BOOK3S_64)
0266 #define is_kernel_addr(x)   ((x) >= PAGE_OFFSET)
0267 #else
0268 #define is_kernel_addr(x)   ((x) >= TASK_SIZE)
0269 #endif
0270 
0271 #ifndef CONFIG_PPC_BOOK3S_64
0272 /*
0273  * Use the top bit of the higher-level page table entries to indicate whether
0274  * the entries we point to contain hugepages.  This works because we know that
0275  * the page tables live in kernel space.  If we ever decide to support having
0276  * page tables at arbitrary addresses, this breaks and will have to change.
0277  */
0278 #ifdef CONFIG_PPC64
0279 #define PD_HUGE 0x8000000000000000UL
0280 #else
0281 #define PD_HUGE 0x80000000
0282 #endif
0283 
0284 #else   /* CONFIG_PPC_BOOK3S_64 */
0285 /*
0286  * Book3S 64 stores real addresses in the hugepd entries to
0287  * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
0288  */
0289 #define HUGEPD_ADDR_MASK    (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
0290 #endif /* CONFIG_PPC_BOOK3S_64 */
0291 
0292 /*
0293  * Some number of bits at the level of the page table that points to
0294  * a hugepte are used to encode the size.  This masks those bits.
0295  * On 8xx, HW assistance requires 4k alignment for the hugepte.
0296  */
0297 #ifdef CONFIG_PPC_8xx
0298 #define HUGEPD_SHIFT_MASK     0xfff
0299 #else
0300 #define HUGEPD_SHIFT_MASK     0x3f
0301 #endif
0302 
0303 #ifndef __ASSEMBLY__
0304 
0305 #ifdef CONFIG_PPC_BOOK3S_64
0306 #include <asm/pgtable-be-types.h>
0307 #else
0308 #include <asm/pgtable-types.h>
0309 #endif
0310 
0311 
0312 #ifndef CONFIG_HUGETLB_PAGE
0313 #define is_hugepd(pdep)     (0)
0314 #define pgd_huge(pgd)       (0)
0315 #endif /* CONFIG_HUGETLB_PAGE */
0316 
0317 struct page;
0318 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
0319 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
0320         struct page *p);
0321 extern int devmem_is_allowed(unsigned long pfn);
0322 
0323 #ifdef CONFIG_PPC_SMLPAR
0324 void arch_free_page(struct page *page, int order);
0325 #define HAVE_ARCH_FREE_PAGE
0326 #endif
0327 
0328 struct vm_area_struct;
0329 
0330 extern unsigned long kernstart_virt_addr;
0331 
0332 static inline unsigned long kaslr_offset(void)
0333 {
0334     return kernstart_virt_addr - KERNELBASE;
0335 }
0336 
0337 #include <asm-generic/memory_model.h>
0338 #endif /* __ASSEMBLY__ */
0339 
0340 #endif /* _ASM_POWERPC_PAGE_H */