0001
0002 #include <linux/mm.h>
0003 #include <linux/rmap.h>
0004 #include <linux/hugetlb.h>
0005 #include <linux/swap.h>
0006 #include <linux/swapops.h>
0007
0008 #include "internal.h"
0009
0010 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
0011 {
0012 page_vma_mapped_walk_done(pvmw);
0013 return false;
0014 }
0015
0016 static bool map_pte(struct page_vma_mapped_walk *pvmw)
0017 {
0018 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
0019 if (!(pvmw->flags & PVMW_SYNC)) {
0020 if (pvmw->flags & PVMW_MIGRATION) {
0021 if (!is_swap_pte(*pvmw->pte))
0022 return false;
0023 } else {
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 if (is_swap_pte(*pvmw->pte)) {
0040 swp_entry_t entry;
0041
0042
0043 entry = pte_to_swp_entry(*pvmw->pte);
0044 if (!is_device_private_entry(entry) &&
0045 !is_device_exclusive_entry(entry))
0046 return false;
0047 } else if (!pte_present(*pvmw->pte))
0048 return false;
0049 }
0050 }
0051 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
0052 spin_lock(pvmw->ptl);
0053 return true;
0054 }
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 static bool check_pte(struct page_vma_mapped_walk *pvmw)
0076 {
0077 unsigned long pfn;
0078
0079 if (pvmw->flags & PVMW_MIGRATION) {
0080 swp_entry_t entry;
0081 if (!is_swap_pte(*pvmw->pte))
0082 return false;
0083 entry = pte_to_swp_entry(*pvmw->pte);
0084
0085 if (!is_migration_entry(entry) &&
0086 !is_device_exclusive_entry(entry))
0087 return false;
0088
0089 pfn = swp_offset(entry);
0090 } else if (is_swap_pte(*pvmw->pte)) {
0091 swp_entry_t entry;
0092
0093
0094 entry = pte_to_swp_entry(*pvmw->pte);
0095 if (!is_device_private_entry(entry) &&
0096 !is_device_exclusive_entry(entry))
0097 return false;
0098
0099 pfn = swp_offset(entry);
0100 } else {
0101 if (!pte_present(*pvmw->pte))
0102 return false;
0103
0104 pfn = pte_pfn(*pvmw->pte);
0105 }
0106
0107 return (pfn - pvmw->pfn) < pvmw->nr_pages;
0108 }
0109
0110
0111 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
0112 {
0113 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
0114 return false;
0115 if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
0116 return false;
0117 return true;
0118 }
0119
0120 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
0121 {
0122 pvmw->address = (pvmw->address + size) & ~(size - 1);
0123 if (!pvmw->address)
0124 pvmw->address = ULONG_MAX;
0125 }
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
0152 {
0153 struct vm_area_struct *vma = pvmw->vma;
0154 struct mm_struct *mm = vma->vm_mm;
0155 unsigned long end;
0156 pgd_t *pgd;
0157 p4d_t *p4d;
0158 pud_t *pud;
0159 pmd_t pmde;
0160
0161
0162 if (pvmw->pmd && !pvmw->pte)
0163 return not_found(pvmw);
0164
0165 if (unlikely(is_vm_hugetlb_page(vma))) {
0166 struct hstate *hstate = hstate_vma(vma);
0167 unsigned long size = huge_page_size(hstate);
0168
0169 if (pvmw->pte)
0170 return not_found(pvmw);
0171
0172
0173 pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
0174 if (!pvmw->pte)
0175 return false;
0176
0177 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
0178 if (!check_pte(pvmw))
0179 return not_found(pvmw);
0180 return true;
0181 }
0182
0183 end = vma_address_end(pvmw);
0184 if (pvmw->pte)
0185 goto next_pte;
0186 restart:
0187 do {
0188 pgd = pgd_offset(mm, pvmw->address);
0189 if (!pgd_present(*pgd)) {
0190 step_forward(pvmw, PGDIR_SIZE);
0191 continue;
0192 }
0193 p4d = p4d_offset(pgd, pvmw->address);
0194 if (!p4d_present(*p4d)) {
0195 step_forward(pvmw, P4D_SIZE);
0196 continue;
0197 }
0198 pud = pud_offset(p4d, pvmw->address);
0199 if (!pud_present(*pud)) {
0200 step_forward(pvmw, PUD_SIZE);
0201 continue;
0202 }
0203
0204 pvmw->pmd = pmd_offset(pud, pvmw->address);
0205
0206
0207
0208
0209
0210 pmde = READ_ONCE(*pvmw->pmd);
0211
0212 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
0213 (pmd_present(pmde) && pmd_devmap(pmde))) {
0214 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
0215 pmde = *pvmw->pmd;
0216 if (!pmd_present(pmde)) {
0217 swp_entry_t entry;
0218
0219 if (!thp_migration_supported() ||
0220 !(pvmw->flags & PVMW_MIGRATION))
0221 return not_found(pvmw);
0222 entry = pmd_to_swp_entry(pmde);
0223 if (!is_migration_entry(entry) ||
0224 !check_pmd(swp_offset(entry), pvmw))
0225 return not_found(pvmw);
0226 return true;
0227 }
0228 if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
0229 if (pvmw->flags & PVMW_MIGRATION)
0230 return not_found(pvmw);
0231 if (!check_pmd(pmd_pfn(pmde), pvmw))
0232 return not_found(pvmw);
0233 return true;
0234 }
0235
0236 spin_unlock(pvmw->ptl);
0237 pvmw->ptl = NULL;
0238 } else if (!pmd_present(pmde)) {
0239
0240
0241
0242
0243
0244 if ((pvmw->flags & PVMW_SYNC) &&
0245 transhuge_vma_suitable(vma, pvmw->address) &&
0246 (pvmw->nr_pages >= HPAGE_PMD_NR)) {
0247 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
0248
0249 spin_unlock(ptl);
0250 }
0251 step_forward(pvmw, PMD_SIZE);
0252 continue;
0253 }
0254 if (!map_pte(pvmw))
0255 goto next_pte;
0256 this_pte:
0257 if (check_pte(pvmw))
0258 return true;
0259 next_pte:
0260 do {
0261 pvmw->address += PAGE_SIZE;
0262 if (pvmw->address >= end)
0263 return not_found(pvmw);
0264
0265 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
0266 if (pvmw->ptl) {
0267 spin_unlock(pvmw->ptl);
0268 pvmw->ptl = NULL;
0269 }
0270 pte_unmap(pvmw->pte);
0271 pvmw->pte = NULL;
0272 goto restart;
0273 }
0274 pvmw->pte++;
0275 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
0276 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
0277 spin_lock(pvmw->ptl);
0278 }
0279 } while (pte_none(*pvmw->pte));
0280
0281 if (!pvmw->ptl) {
0282 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
0283 spin_lock(pvmw->ptl);
0284 }
0285 goto this_pte;
0286 } while (pvmw->address < end);
0287
0288 return false;
0289 }
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
0301 {
0302 struct page_vma_mapped_walk pvmw = {
0303 .pfn = page_to_pfn(page),
0304 .nr_pages = 1,
0305 .vma = vma,
0306 .flags = PVMW_SYNC,
0307 };
0308
0309 pvmw.address = vma_address(page, vma);
0310 if (pvmw.address == -EFAULT)
0311 return 0;
0312 if (!page_vma_mapped_walk(&pvmw))
0313 return 0;
0314 page_vma_mapped_walk_done(&pvmw);
0315 return 1;
0316 }