Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_PAGE_64_H
0003 #define _ASM_X86_PAGE_64_H
0004 
0005 #include <asm/page_64_types.h>
0006 
0007 #ifndef __ASSEMBLY__
0008 #include <asm/cpufeatures.h>
0009 #include <asm/alternative.h>
0010 
0011 /* duplicated to the one in bootmem.h */
0012 extern unsigned long max_pfn;
0013 extern unsigned long phys_base;
0014 
0015 extern unsigned long page_offset_base;
0016 extern unsigned long vmalloc_base;
0017 extern unsigned long vmemmap_base;
0018 
0019 static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
0020 {
0021     unsigned long y = x - __START_KERNEL_map;
0022 
0023     /* use the carry flag to determine if x was < __START_KERNEL_map */
0024     x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
0025 
0026     return x;
0027 }
0028 
0029 #ifdef CONFIG_DEBUG_VIRTUAL
0030 extern unsigned long __phys_addr(unsigned long);
0031 extern unsigned long __phys_addr_symbol(unsigned long);
0032 #else
0033 #define __phys_addr(x)      __phys_addr_nodebug(x)
0034 #define __phys_addr_symbol(x) \
0035     ((unsigned long)(x) - __START_KERNEL_map + phys_base)
0036 #endif
0037 
0038 #define __phys_reloc_hide(x)    (x)
0039 
0040 #ifdef CONFIG_FLATMEM
0041 #define pfn_valid(pfn)          ((pfn) < max_pfn)
0042 #endif
0043 
0044 void clear_page_orig(void *page);
0045 void clear_page_rep(void *page);
0046 void clear_page_erms(void *page);
0047 
0048 static inline void clear_page(void *page)
0049 {
0050     alternative_call_2(clear_page_orig,
0051                clear_page_rep, X86_FEATURE_REP_GOOD,
0052                clear_page_erms, X86_FEATURE_ERMS,
0053                "=D" (page),
0054                "0" (page)
0055                : "cc", "memory", "rax", "rcx");
0056 }
0057 
0058 void copy_page(void *to, void *from);
0059 
0060 #ifdef CONFIG_X86_5LEVEL
0061 /*
0062  * User space process size.  This is the first address outside the user range.
0063  * There are a few constraints that determine this:
0064  *
0065  * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
0066  * address, then that syscall will enter the kernel with a
0067  * non-canonical return address, and SYSRET will explode dangerously.
0068  * We avoid this particular problem by preventing anything
0069  * from being mapped at the maximum canonical address.
0070  *
0071  * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
0072  * CPUs malfunction if they execute code from the highest canonical page.
0073  * They'll speculate right off the end of the canonical space, and
0074  * bad things happen.  This is worked around in the same way as the
0075  * Intel problem.
0076  *
0077  * With page table isolation enabled, we map the LDT in ... [stay tuned]
0078  */
0079 static __always_inline unsigned long task_size_max(void)
0080 {
0081     unsigned long ret;
0082 
0083     alternative_io("movq %[small],%0","movq %[large],%0",
0084             X86_FEATURE_LA57,
0085             "=r" (ret),
0086             [small] "i" ((1ul << 47)-PAGE_SIZE),
0087             [large] "i" ((1ul << 56)-PAGE_SIZE));
0088 
0089     return ret;
0090 }
0091 #endif  /* CONFIG_X86_5LEVEL */
0092 
0093 #endif  /* !__ASSEMBLY__ */
0094 
0095 #ifdef CONFIG_X86_VSYSCALL_EMULATION
0096 # define __HAVE_ARCH_GATE_AREA 1
0097 #endif
0098 
0099 #endif /* _ASM_X86_PAGE_64_H */