0001
0002 #ifndef _ASM_X86_PGTABLE_2LEVEL_H
0003 #define _ASM_X86_PGTABLE_2LEVEL_H
0004
0005 #define pte_ERROR(e) \
0006 pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
0007 #define pgd_ERROR(e) \
0008 pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
0009
0010
0011
0012
0013
0014
0015 static inline void native_set_pte(pte_t *ptep , pte_t pte)
0016 {
0017 *ptep = pte;
0018 }
0019
0020 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
0021 {
0022 *pmdp = pmd;
0023 }
0024
0025 static inline void native_set_pud(pud_t *pudp, pud_t pud)
0026 {
0027 }
0028
0029 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
0030 {
0031 native_set_pte(ptep, pte);
0032 }
0033
0034 static inline void native_pmd_clear(pmd_t *pmdp)
0035 {
0036 native_set_pmd(pmdp, __pmd(0));
0037 }
0038
0039 static inline void native_pud_clear(pud_t *pudp)
0040 {
0041 }
0042
0043 static inline void native_pte_clear(struct mm_struct *mm,
0044 unsigned long addr, pte_t *xp)
0045 {
0046 *xp = native_make_pte(0);
0047 }
0048
0049 #ifdef CONFIG_SMP
0050 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
0051 {
0052 return __pte(xchg(&xp->pte_low, 0));
0053 }
0054 #else
0055 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
0056 #endif
0057
0058 #ifdef CONFIG_SMP
0059 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
0060 {
0061 return __pmd(xchg((pmdval_t *)xp, 0));
0062 }
0063 #else
0064 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
0065 #endif
0066
0067 #ifdef CONFIG_SMP
0068 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
0069 {
0070 return __pud(xchg((pudval_t *)xp, 0));
0071 }
0072 #else
0073 #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
0074 #endif
0075
0076
0077 static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
0078 unsigned long mask, unsigned int leftshift)
0079 {
0080 return ((value >> rightshift) & mask) << leftshift;
0081 }
0082
0083
0084 #define SWP_TYPE_BITS 5
0085 #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
0086
0087 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
0088
0089 #define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
0090 & ((1U << SWP_TYPE_BITS) - 1))
0091 #define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
0092 #define __swp_entry(type, offset) ((swp_entry_t) { \
0093 ((type) << (_PAGE_BIT_PRESENT + 1)) \
0094 | ((offset) << SWP_OFFSET_SHIFT) })
0095 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
0096 #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
0097
0098
0099
0100 static inline u64 protnone_mask(u64 val)
0101 {
0102 return 0;
0103 }
0104
0105 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
0106 {
0107 return val;
0108 }
0109
0110 static inline bool __pte_needs_invert(u64 val)
0111 {
0112 return false;
0113 }
0114
0115 #endif