0001
0002
0003
0004
0005
0006
0007
0008
0009 #ifndef _ASM_PAGE_H
0010 #define _ASM_PAGE_H
0011
0012 #include <spaces.h>
0013 #include <linux/const.h>
0014 #include <linux/kernel.h>
0015 #include <asm/mipsregs.h>
0016
0017
0018
0019
0020 #ifdef CONFIG_PAGE_SIZE_4KB
0021 #define PAGE_SHIFT 12
0022 #endif
0023 #ifdef CONFIG_PAGE_SIZE_8KB
0024 #define PAGE_SHIFT 13
0025 #endif
0026 #ifdef CONFIG_PAGE_SIZE_16KB
0027 #define PAGE_SHIFT 14
0028 #endif
0029 #ifdef CONFIG_PAGE_SIZE_32KB
0030 #define PAGE_SHIFT 15
0031 #endif
0032 #ifdef CONFIG_PAGE_SIZE_64KB
0033 #define PAGE_SHIFT 16
0034 #endif
0035 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
0036 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
0037
0038
0039
0040
0041
0042 static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
0043 {
0044 switch (mmuextdef) {
0045 case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
0046 if (PAGE_SIZE == (1 << 30))
0047 return 5;
0048 if (PAGE_SIZE == (1llu << 32))
0049 return 6;
0050 if (PAGE_SIZE > (256 << 10))
0051 return 7;
0052 fallthrough;
0053 case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
0054 return (PAGE_SHIFT - 10) / 2;
0055 default:
0056 panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
0057 mmuextdef >> 14);
0058 }
0059 }
0060
0061 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
0062 #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
0063 #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
0064 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
0065 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
0066 #else
0067 #define HPAGE_SHIFT ({BUILD_BUG(); 0; })
0068 #define HPAGE_SIZE ({BUILD_BUG(); 0; })
0069 #define HPAGE_MASK ({BUILD_BUG(); 0; })
0070 #define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
0071 #endif
0072
0073 #include <linux/pfn.h>
0074
0075 extern void build_clear_page(void);
0076 extern void build_copy_page(void);
0077
0078
0079
0080
0081
0082
0083 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
0084 extern unsigned long ARCH_PFN_OFFSET;
0085 # define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
0086 #else
0087 # define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
0088 #endif
0089
0090 extern void clear_page(void * page);
0091 extern void copy_page(void * to, void * from);
0092
0093 extern unsigned long shm_align_mask;
0094
0095 static inline unsigned long pages_do_alias(unsigned long addr1,
0096 unsigned long addr2)
0097 {
0098 return (addr1 ^ addr2) & shm_align_mask;
0099 }
0100
0101 struct page;
0102
0103 static inline void clear_user_page(void *addr, unsigned long vaddr,
0104 struct page *page)
0105 {
0106 extern void (*flush_data_cache_page)(unsigned long addr);
0107
0108 clear_page(addr);
0109 if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
0110 flush_data_cache_page((unsigned long)addr);
0111 }
0112
0113 struct vm_area_struct;
0114 extern void copy_user_highpage(struct page *to, struct page *from,
0115 unsigned long vaddr, struct vm_area_struct *vma);
0116
0117 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
0118
0119
0120
0121
0122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0123 #ifdef CONFIG_CPU_MIPS32
0124 typedef struct { unsigned long pte_low, pte_high; } pte_t;
0125 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
0126 #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
0127 #else
0128 typedef struct { unsigned long long pte; } pte_t;
0129 #define pte_val(x) ((x).pte)
0130 #define __pte(x) ((pte_t) { (x) } )
0131 #endif
0132 #else
0133 typedef struct { unsigned long pte; } pte_t;
0134 #define pte_val(x) ((x).pte)
0135 #define __pte(x) ((pte_t) { (x) } )
0136 #endif
0137 typedef struct page *pgtable_t;
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 typedef struct { unsigned long pgd; } pgd_t;
0148 #define pgd_val(x) ((x).pgd)
0149 #define __pgd(x) ((pgd_t) { (x) } )
0150
0151
0152
0153
0154 typedef struct { unsigned long pgprot; } pgprot_t;
0155 #define pgprot_val(x) ((x).pgprot)
0156 #define __pgprot(x) ((pgprot_t) { (x) } )
0157 #define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
0158
0159
0160
0161
0162
0163
0164
0165
0166 #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
0167
0168
0169
0170
0171 static inline unsigned long ___pa(unsigned long x)
0172 {
0173 if (IS_ENABLED(CONFIG_64BIT)) {
0174
0175
0176
0177
0178
0179 return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
0180 }
0181
0182 if (!IS_ENABLED(CONFIG_EVA)) {
0183
0184
0185
0186
0187
0188
0189 return CPHYSADDR(x);
0190 }
0191
0192
0193
0194
0195
0196 return x - PAGE_OFFSET + PHYS_OFFSET;
0197 }
0198 #define __pa(x) ___pa((unsigned long)(x))
0199 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
0200 #include <asm/io.h>
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213 #define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
0214
0215 #ifdef CONFIG_DEBUG_VIRTUAL
0216 extern phys_addr_t __phys_addr_symbol(unsigned long x);
0217 #else
0218 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
0219 #endif
0220
0221 #ifndef __pa_symbol
0222 #define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x))
0223 #endif
0224
0225 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
0226
0227 #ifdef CONFIG_FLATMEM
0228
0229 static inline int pfn_valid(unsigned long pfn)
0230 {
0231
0232 extern unsigned long max_mapnr;
0233 unsigned long pfn_offset = ARCH_PFN_OFFSET;
0234
0235 return pfn >= pfn_offset && pfn < max_mapnr;
0236 }
0237
0238 #elif defined(CONFIG_SPARSEMEM)
0239
0240
0241
0242 #elif defined(CONFIG_NUMA)
0243
0244 #define pfn_valid(pfn) \
0245 ({ \
0246 unsigned long __pfn = (pfn); \
0247 int __n = pfn_to_nid(__pfn); \
0248 ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \
0249 NODE_DATA(__n)->node_spanned_pages) \
0250 : 0); \
0251 })
0252
0253 #endif
0254
0255 #define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
0256 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
0257
0258 extern bool __virt_addr_valid(const volatile void *kaddr);
0259 #define virt_addr_valid(kaddr) \
0260 __virt_addr_valid((const volatile void *) (kaddr))
0261
0262 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
0263
0264 extern unsigned long __kaslr_offset;
0265 static inline unsigned long kaslr_offset(void)
0266 {
0267 return __kaslr_offset;
0268 }
0269
0270 #include <asm-generic/memory_model.h>
0271 #include <asm-generic/getorder.h>
0272
0273 #endif