Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
0007  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
0008  */
0009 #ifndef _ASM_PAGE_H
0010 #define _ASM_PAGE_H
0011 
0012 #include <spaces.h>
0013 #include <linux/const.h>
0014 #include <linux/kernel.h>
0015 #include <asm/mipsregs.h>
0016 
0017 /*
0018  * PAGE_SHIFT determines the page size
0019  */
0020 #ifdef CONFIG_PAGE_SIZE_4KB
0021 #define PAGE_SHIFT  12
0022 #endif
0023 #ifdef CONFIG_PAGE_SIZE_8KB
0024 #define PAGE_SHIFT  13
0025 #endif
0026 #ifdef CONFIG_PAGE_SIZE_16KB
0027 #define PAGE_SHIFT  14
0028 #endif
0029 #ifdef CONFIG_PAGE_SIZE_32KB
0030 #define PAGE_SHIFT  15
0031 #endif
0032 #ifdef CONFIG_PAGE_SIZE_64KB
0033 #define PAGE_SHIFT  16
0034 #endif
0035 #define PAGE_SIZE   (_AC(1,UL) << PAGE_SHIFT)
0036 #define PAGE_MASK   (~((1 << PAGE_SHIFT) - 1))
0037 
0038 /*
0039  * This is used for calculating the real page sizes
0040  * for FTLB or VTLB + FTLB configurations.
0041  */
0042 static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
0043 {
0044     switch (mmuextdef) {
0045     case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
0046         if (PAGE_SIZE == (1 << 30))
0047             return 5;
0048         if (PAGE_SIZE == (1llu << 32))
0049             return 6;
0050         if (PAGE_SIZE > (256 << 10))
0051             return 7; /* reserved */
0052         fallthrough;
0053     case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
0054         return (PAGE_SHIFT - 10) / 2;
0055     default:
0056         panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
0057               mmuextdef >> 14);
0058     }
0059 }
0060 
0061 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
0062 #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
0063 #define HPAGE_SIZE  (_AC(1,UL) << HPAGE_SHIFT)
0064 #define HPAGE_MASK  (~(HPAGE_SIZE - 1))
0065 #define HUGETLB_PAGE_ORDER  (HPAGE_SHIFT - PAGE_SHIFT)
0066 #else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
0067 #define HPAGE_SHIFT ({BUILD_BUG(); 0; })
0068 #define HPAGE_SIZE  ({BUILD_BUG(); 0; })
0069 #define HPAGE_MASK  ({BUILD_BUG(); 0; })
0070 #define HUGETLB_PAGE_ORDER  ({BUILD_BUG(); 0; })
0071 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
0072 
0073 #include <linux/pfn.h>
0074 
0075 extern void build_clear_page(void);
0076 extern void build_copy_page(void);
0077 
0078 /*
0079  * It's normally defined only for FLATMEM config but it's
0080  * used in our early mem init code for all memory models.
0081  * So always define it.
0082  */
0083 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
0084 extern unsigned long ARCH_PFN_OFFSET;
0085 # define ARCH_PFN_OFFSET    ARCH_PFN_OFFSET
0086 #else
0087 # define ARCH_PFN_OFFSET    PFN_UP(PHYS_OFFSET)
0088 #endif
0089 
0090 extern void clear_page(void * page);
0091 extern void copy_page(void * to, void * from);
0092 
0093 extern unsigned long shm_align_mask;
0094 
0095 static inline unsigned long pages_do_alias(unsigned long addr1,
0096     unsigned long addr2)
0097 {
0098     return (addr1 ^ addr2) & shm_align_mask;
0099 }
0100 
0101 struct page;
0102 
0103 static inline void clear_user_page(void *addr, unsigned long vaddr,
0104     struct page *page)
0105 {
0106     extern void (*flush_data_cache_page)(unsigned long addr);
0107 
0108     clear_page(addr);
0109     if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
0110         flush_data_cache_page((unsigned long)addr);
0111 }
0112 
0113 struct vm_area_struct;
0114 extern void copy_user_highpage(struct page *to, struct page *from,
0115     unsigned long vaddr, struct vm_area_struct *vma);
0116 
0117 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
0118 
0119 /*
0120  * These are used to make use of C type-checking..
0121  */
0122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0123   #ifdef CONFIG_CPU_MIPS32
0124     typedef struct { unsigned long pte_low, pte_high; } pte_t;
0125     #define pte_val(x)    ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
0126     #define __pte(x)      ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
0127   #else
0128      typedef struct { unsigned long long pte; } pte_t;
0129      #define pte_val(x) ((x).pte)
0130      #define __pte(x)   ((pte_t) { (x) } )
0131   #endif
0132 #else
0133 typedef struct { unsigned long pte; } pte_t;
0134 #define pte_val(x)  ((x).pte)
0135 #define __pte(x)    ((pte_t) { (x) } )
0136 #endif
0137 typedef struct page *pgtable_t;
0138 
0139 /*
0140  * Right now we don't support 4-level pagetables, so all pud-related
0141  * definitions come from <asm-generic/pgtable-nopud.h>.
0142  */
0143 
0144 /*
0145  * Finall the top of the hierarchy, the pgd
0146  */
0147 typedef struct { unsigned long pgd; } pgd_t;
0148 #define pgd_val(x)  ((x).pgd)
0149 #define __pgd(x)    ((pgd_t) { (x) } )
0150 
0151 /*
0152  * Manipulate page protection bits
0153  */
0154 typedef struct { unsigned long pgprot; } pgprot_t;
0155 #define pgprot_val(x)   ((x).pgprot)
0156 #define __pgprot(x) ((pgprot_t) { (x) } )
0157 #define pte_pgprot(x)   __pgprot(pte_val(x) & ~_PFN_MASK)
0158 
0159 /*
0160  * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
0161  * pair of pages we only have a single global bit per pair of pages.  When
0162  * writing to the TLB make sure we always have the bit set for both pages
0163  * or none.  This macro is used to access the `buddy' of the pte we're just
0164  * working on.
0165  */
0166 #define ptep_buddy(x)   ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
0167 
0168 /*
0169  * __pa()/__va() should be used only during mem init.
0170  */
0171 static inline unsigned long ___pa(unsigned long x)
0172 {
0173     if (IS_ENABLED(CONFIG_64BIT)) {
0174         /*
0175          * For MIPS64 the virtual address may either be in one of
0176          * the compatibility segements ckseg0 or ckseg1, or it may
0177          * be in xkphys.
0178          */
0179         return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
0180     }
0181 
0182     if (!IS_ENABLED(CONFIG_EVA)) {
0183         /*
0184          * We're using the standard MIPS32 legacy memory map, ie.
0185          * the address x is going to be in kseg0 or kseg1. We can
0186          * handle either case by masking out the desired bits using
0187          * CPHYSADDR.
0188          */
0189         return CPHYSADDR(x);
0190     }
0191 
0192     /*
0193      * EVA is in use so the memory map could be anything, making it not
0194      * safe to just mask out bits.
0195      */
0196     return x - PAGE_OFFSET + PHYS_OFFSET;
0197 }
0198 #define __pa(x)     ___pa((unsigned long)(x))
0199 #define __va(x)     ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
0200 #include <asm/io.h>
0201 
0202 /*
0203  * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
0204  * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
0205  * discussion can be found in
0206  * https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com
0207  *
0208  * It is unclear if the misscompilations mentioned in
0209  * https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com
0210  * also affect MIPS so we keep this one until GCC 3.x has been retired
0211  * before we can apply https://patchwork.linux-mips.org/patch/1541/
0212  */
0213 #define __pa_symbol_nodebug(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
0214 
0215 #ifdef CONFIG_DEBUG_VIRTUAL
0216 extern phys_addr_t __phys_addr_symbol(unsigned long x);
0217 #else
0218 #define __phys_addr_symbol(x)   __pa_symbol_nodebug(x)
0219 #endif
0220 
0221 #ifndef __pa_symbol
0222 #define __pa_symbol(x)      __phys_addr_symbol((unsigned long)(x))
0223 #endif
0224 
0225 #define pfn_to_kaddr(pfn)   __va((pfn) << PAGE_SHIFT)
0226 
0227 #ifdef CONFIG_FLATMEM
0228 
0229 static inline int pfn_valid(unsigned long pfn)
0230 {
0231     /* avoid <linux/mm.h> include hell */
0232     extern unsigned long max_mapnr;
0233     unsigned long pfn_offset = ARCH_PFN_OFFSET;
0234 
0235     return pfn >= pfn_offset && pfn < max_mapnr;
0236 }
0237 
0238 #elif defined(CONFIG_SPARSEMEM)
0239 
0240 /* pfn_valid is defined in linux/mmzone.h */
0241 
0242 #elif defined(CONFIG_NUMA)
0243 
0244 #define pfn_valid(pfn)                          \
0245 ({                                  \
0246     unsigned long __pfn = (pfn);                    \
0247     int __n = pfn_to_nid(__pfn);                    \
0248     ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn +     \
0249                    NODE_DATA(__n)->node_spanned_pages)  \
0250             : 0);                       \
0251 })
0252 
0253 #endif
0254 
0255 #define virt_to_pfn(kaddr)      PFN_DOWN(virt_to_phys((void *)(kaddr)))
0256 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
0257 
0258 extern bool __virt_addr_valid(const volatile void *kaddr);
0259 #define virt_addr_valid(kaddr)                      \
0260     __virt_addr_valid((const volatile void *) (kaddr))
0261 
0262 #define VM_DATA_DEFAULT_FLAGS   VM_DATA_FLAGS_TSK_EXEC
0263 
0264 extern unsigned long __kaslr_offset;
0265 static inline unsigned long kaslr_offset(void)
0266 {
0267     return __kaslr_offset;
0268 }
0269 
0270 #include <asm-generic/memory_model.h>
0271 #include <asm-generic/getorder.h>
0272 
0273 #endif /* _ASM_PAGE_H */