0001
0002 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
0003 #define _ASM_X86_PGTABLE_3LEVEL_H
0004
0005 #include <asm/atomic64_32.h>
0006
0007
0008
0009
0010
0011
0012
0013
0014 #define pte_ERROR(e) \
0015 pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
0016 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
0017 #define pmd_ERROR(e) \
0018 pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
0019 __FILE__, __LINE__, &(e), pmd_val(e))
0020 #define pgd_ERROR(e) \
0021 pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
0022 __FILE__, __LINE__, &(e), pgd_val(e))
0023
0024
0025
0026
0027
0028
0029
0030 static inline void native_set_pte(pte_t *ptep, pte_t pte)
0031 {
0032 ptep->pte_high = pte.pte_high;
0033 smp_wmb();
0034 ptep->pte_low = pte.pte_low;
0035 }
0036
0037 #define pmd_read_atomic pmd_read_atomic
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
0076 {
0077 pmdval_t ret;
0078 u32 *tmp = (u32 *)pmdp;
0079
0080 ret = (pmdval_t) (*tmp);
0081 if (ret) {
0082
0083
0084
0085
0086 smp_rmb();
0087 ret |= ((pmdval_t)*(tmp + 1)) << 32;
0088 }
0089
0090 return (pmd_t) { ret };
0091 }
0092
0093 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
0094 {
0095 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
0096 }
0097
0098 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
0099 {
0100 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
0101 }
0102
0103 static inline void native_set_pud(pud_t *pudp, pud_t pud)
0104 {
0105 #ifdef CONFIG_PAGE_TABLE_ISOLATION
0106 pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
0107 #endif
0108 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
0109 }
0110
0111
0112
0113
0114
0115
0116 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
0117 pte_t *ptep)
0118 {
0119 ptep->pte_low = 0;
0120 smp_wmb();
0121 ptep->pte_high = 0;
0122 }
0123
0124 static inline void native_pmd_clear(pmd_t *pmd)
0125 {
0126 u32 *tmp = (u32 *)pmd;
0127 *tmp = 0;
0128 smp_wmb();
0129 *(tmp + 1) = 0;
0130 }
0131
0132 static inline void native_pud_clear(pud_t *pudp)
0133 {
0134 }
0135
0136 static inline void pud_clear(pud_t *pudp)
0137 {
0138 set_pud(pudp, __pud(0));
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 }
0151
0152 #ifdef CONFIG_SMP
0153 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
0154 {
0155 pte_t res;
0156
0157 res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
0158
0159 return res;
0160 }
0161 #else
0162 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
0163 #endif
0164
0165 union split_pmd {
0166 struct {
0167 u32 pmd_low;
0168 u32 pmd_high;
0169 };
0170 pmd_t pmd;
0171 };
0172
0173 #ifdef CONFIG_SMP
0174 static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
0175 {
0176 union split_pmd res, *orig = (union split_pmd *)pmdp;
0177
0178
0179 res.pmd_low = xchg(&orig->pmd_low, 0);
0180 res.pmd_high = orig->pmd_high;
0181 orig->pmd_high = 0;
0182
0183 return res.pmd;
0184 }
0185 #else
0186 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
0187 #endif
0188
0189 #ifndef pmdp_establish
0190 #define pmdp_establish pmdp_establish
0191 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
0192 unsigned long address, pmd_t *pmdp, pmd_t pmd)
0193 {
0194 pmd_t old;
0195
0196
0197
0198
0199
0200
0201 if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
0202 union split_pmd old, new, *ptr;
0203
0204 ptr = (union split_pmd *)pmdp;
0205
0206 new.pmd = pmd;
0207
0208
0209 old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
0210 old.pmd_high = ptr->pmd_high;
0211 ptr->pmd_high = new.pmd_high;
0212 return old.pmd;
0213 }
0214
0215 do {
0216 old = *pmdp;
0217 } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
0218
0219 return old;
0220 }
0221 #endif
0222
0223 #ifdef CONFIG_SMP
0224 union split_pud {
0225 struct {
0226 u32 pud_low;
0227 u32 pud_high;
0228 };
0229 pud_t pud;
0230 };
0231
0232 static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
0233 {
0234 union split_pud res, *orig = (union split_pud *)pudp;
0235
0236 #ifdef CONFIG_PAGE_TABLE_ISOLATION
0237 pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
0238 #endif
0239
0240
0241 res.pud_low = xchg(&orig->pud_low, 0);
0242 res.pud_high = orig->pud_high;
0243 orig->pud_high = 0;
0244
0245 return res.pud;
0246 }
0247 #else
0248 #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
0249 #endif
0250
0251
0252 #define SWP_TYPE_BITS 5
0253
0254 #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
0255
0256
0257 #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
0258
0259 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
0260 #define __swp_type(x) (((x).val) & 0x1f)
0261 #define __swp_offset(x) ((x).val >> 5)
0262 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 #define __swp_pteval_entry(type, offset) ((pteval_t) { \
0273 (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
0274 | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
0275
0276 #define __swp_entry_to_pte(x) ((pte_t){ .pte = \
0277 __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
0278
0279
0280
0281
0282
0283
0284 #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
0285 #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
0286
0287 #define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
0288 __pteval_swp_offset(pte)))
0289
0290 #include <asm/pgtable-invert.h>
0291
0292 #endif