0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/pagemap.h>
0011 #include <linux/hugetlb.h>
0012 #include <linux/pgtable.h>
0013 #include <linux/mm_inline.h>
0014 #include <asm/tlb.h>
0015
0016
0017
0018
0019
0020
0021
0022 void pgd_clear_bad(pgd_t *pgd)
0023 {
0024 pgd_ERROR(*pgd);
0025 pgd_clear(pgd);
0026 }
0027
0028 #ifndef __PAGETABLE_P4D_FOLDED
0029 void p4d_clear_bad(p4d_t *p4d)
0030 {
0031 p4d_ERROR(*p4d);
0032 p4d_clear(p4d);
0033 }
0034 #endif
0035
0036 #ifndef __PAGETABLE_PUD_FOLDED
0037 void pud_clear_bad(pud_t *pud)
0038 {
0039 pud_ERROR(*pud);
0040 pud_clear(pud);
0041 }
0042 #endif
0043
0044
0045
0046
0047
0048
0049 void pmd_clear_bad(pmd_t *pmd)
0050 {
0051 pmd_ERROR(*pmd);
0052 pmd_clear(pmd);
0053 }
0054
0055 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 int ptep_set_access_flags(struct vm_area_struct *vma,
0066 unsigned long address, pte_t *ptep,
0067 pte_t entry, int dirty)
0068 {
0069 int changed = !pte_same(*ptep, entry);
0070 if (changed) {
0071 set_pte_at(vma->vm_mm, address, ptep, entry);
0072 flush_tlb_fix_spurious_fault(vma, address);
0073 }
0074 return changed;
0075 }
0076 #endif
0077
0078 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
0079 int ptep_clear_flush_young(struct vm_area_struct *vma,
0080 unsigned long address, pte_t *ptep)
0081 {
0082 int young;
0083 young = ptep_test_and_clear_young(vma, address, ptep);
0084 if (young)
0085 flush_tlb_page(vma, address);
0086 return young;
0087 }
0088 #endif
0089
0090 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
0091 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
0092 pte_t *ptep)
0093 {
0094 struct mm_struct *mm = (vma)->vm_mm;
0095 pte_t pte;
0096 pte = ptep_get_and_clear(mm, address, ptep);
0097 if (pte_accessible(mm, pte))
0098 flush_tlb_page(vma, address);
0099 return pte;
0100 }
0101 #endif
0102
0103 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0104
0105 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
0106 int pmdp_set_access_flags(struct vm_area_struct *vma,
0107 unsigned long address, pmd_t *pmdp,
0108 pmd_t entry, int dirty)
0109 {
0110 int changed = !pmd_same(*pmdp, entry);
0111 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
0112 if (changed) {
0113 set_pmd_at(vma->vm_mm, address, pmdp, entry);
0114 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
0115 }
0116 return changed;
0117 }
0118 #endif
0119
0120 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
0121 int pmdp_clear_flush_young(struct vm_area_struct *vma,
0122 unsigned long address, pmd_t *pmdp)
0123 {
0124 int young;
0125 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
0126 young = pmdp_test_and_clear_young(vma, address, pmdp);
0127 if (young)
0128 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
0129 return young;
0130 }
0131 #endif
0132
0133 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
0134 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
0135 pmd_t *pmdp)
0136 {
0137 pmd_t pmd;
0138 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
0139 VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
0140 !pmd_devmap(*pmdp));
0141 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
0142 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
0143 return pmd;
0144 }
0145
0146 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
0147 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
0148 pud_t *pudp)
0149 {
0150 pud_t pud;
0151
0152 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
0153 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
0154 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
0155 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
0156 return pud;
0157 }
0158 #endif
0159 #endif
0160
0161 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
0162 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
0163 pgtable_t pgtable)
0164 {
0165 assert_spin_locked(pmd_lockptr(mm, pmdp));
0166
0167
0168 if (!pmd_huge_pte(mm, pmdp))
0169 INIT_LIST_HEAD(&pgtable->lru);
0170 else
0171 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
0172 pmd_huge_pte(mm, pmdp) = pgtable;
0173 }
0174 #endif
0175
0176 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
0177
0178 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
0179 {
0180 pgtable_t pgtable;
0181
0182 assert_spin_locked(pmd_lockptr(mm, pmdp));
0183
0184
0185 pgtable = pmd_huge_pte(mm, pmdp);
0186 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
0187 struct page, lru);
0188 if (pmd_huge_pte(mm, pmdp))
0189 list_del(&pgtable->lru);
0190 return pgtable;
0191 }
0192 #endif
0193
0194 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
0195 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
0196 pmd_t *pmdp)
0197 {
0198 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
0199 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
0200 return old;
0201 }
0202 #endif
0203
0204 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
0205 pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
0206 pmd_t *pmdp)
0207 {
0208 return pmdp_invalidate(vma, address, pmdp);
0209 }
0210 #endif
0211
0212 #ifndef pmdp_collapse_flush
0213 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
0214 pmd_t *pmdp)
0215 {
0216
0217
0218
0219
0220 pmd_t pmd;
0221
0222 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
0223 VM_BUG_ON(pmd_trans_huge(*pmdp));
0224 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
0225
0226
0227 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
0228 return pmd;
0229 }
0230 #endif
0231 #endif