0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/pagewalk.h>
0012 #include <linux/hmm.h>
0013 #include <linux/init.h>
0014 #include <linux/rmap.h>
0015 #include <linux/swap.h>
0016 #include <linux/slab.h>
0017 #include <linux/sched.h>
0018 #include <linux/mmzone.h>
0019 #include <linux/pagemap.h>
0020 #include <linux/swapops.h>
0021 #include <linux/hugetlb.h>
0022 #include <linux/memremap.h>
0023 #include <linux/sched/mm.h>
0024 #include <linux/jump_label.h>
0025 #include <linux/dma-mapping.h>
0026 #include <linux/mmu_notifier.h>
0027 #include <linux/memory_hotplug.h>
0028
0029 #include "internal.h"
0030
0031 struct hmm_vma_walk {
0032 struct hmm_range *range;
0033 unsigned long last;
0034 };
0035
0036 enum {
0037 HMM_NEED_FAULT = 1 << 0,
0038 HMM_NEED_WRITE_FAULT = 1 << 1,
0039 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
0040 };
0041
0042 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
0043 struct hmm_range *range, unsigned long cpu_flags)
0044 {
0045 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
0046
0047 for (; addr < end; addr += PAGE_SIZE, i++)
0048 range->hmm_pfns[i] = cpu_flags;
0049 return 0;
0050 }
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 static int hmm_vma_fault(unsigned long addr, unsigned long end,
0064 unsigned int required_fault, struct mm_walk *walk)
0065 {
0066 struct hmm_vma_walk *hmm_vma_walk = walk->private;
0067 struct vm_area_struct *vma = walk->vma;
0068 unsigned int fault_flags = FAULT_FLAG_REMOTE;
0069
0070 WARN_ON_ONCE(!required_fault);
0071 hmm_vma_walk->last = addr;
0072
0073 if (required_fault & HMM_NEED_WRITE_FAULT) {
0074 if (!(vma->vm_flags & VM_WRITE))
0075 return -EPERM;
0076 fault_flags |= FAULT_FLAG_WRITE;
0077 }
0078
0079 for (; addr < end; addr += PAGE_SIZE)
0080 if (handle_mm_fault(vma, addr, fault_flags, NULL) &
0081 VM_FAULT_ERROR)
0082 return -EFAULT;
0083 return -EBUSY;
0084 }
0085
0086 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
0087 unsigned long pfn_req_flags,
0088 unsigned long cpu_flags)
0089 {
0090 struct hmm_range *range = hmm_vma_walk->range;
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 pfn_req_flags &= range->pfn_flags_mask;
0103 pfn_req_flags |= range->default_flags;
0104
0105
0106 if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
0107 return 0;
0108
0109
0110 if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
0111 !(cpu_flags & HMM_PFN_WRITE))
0112 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
0113
0114
0115 if (!(cpu_flags & HMM_PFN_VALID))
0116 return HMM_NEED_FAULT;
0117 return 0;
0118 }
0119
0120 static unsigned int
0121 hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
0122 const unsigned long hmm_pfns[], unsigned long npages,
0123 unsigned long cpu_flags)
0124 {
0125 struct hmm_range *range = hmm_vma_walk->range;
0126 unsigned int required_fault = 0;
0127 unsigned long i;
0128
0129
0130
0131
0132
0133
0134 if (!((range->default_flags | range->pfn_flags_mask) &
0135 HMM_PFN_REQ_FAULT))
0136 return 0;
0137
0138 for (i = 0; i < npages; ++i) {
0139 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
0140 cpu_flags);
0141 if (required_fault == HMM_NEED_ALL_BITS)
0142 return required_fault;
0143 }
0144 return required_fault;
0145 }
0146
0147 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
0148 __always_unused int depth, struct mm_walk *walk)
0149 {
0150 struct hmm_vma_walk *hmm_vma_walk = walk->private;
0151 struct hmm_range *range = hmm_vma_walk->range;
0152 unsigned int required_fault;
0153 unsigned long i, npages;
0154 unsigned long *hmm_pfns;
0155
0156 i = (addr - range->start) >> PAGE_SHIFT;
0157 npages = (end - addr) >> PAGE_SHIFT;
0158 hmm_pfns = &range->hmm_pfns[i];
0159 required_fault =
0160 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
0161 if (!walk->vma) {
0162 if (required_fault)
0163 return -EFAULT;
0164 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
0165 }
0166 if (required_fault)
0167 return hmm_vma_fault(addr, end, required_fault, walk);
0168 return hmm_pfns_fill(addr, end, range, 0);
0169 }
0170
0171 static inline unsigned long hmm_pfn_flags_order(unsigned long order)
0172 {
0173 return order << HMM_PFN_ORDER_SHIFT;
0174 }
0175
0176 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
0177 pmd_t pmd)
0178 {
0179 if (pmd_protnone(pmd))
0180 return 0;
0181 return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
0182 HMM_PFN_VALID) |
0183 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
0184 }
0185
0186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0187 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
0188 unsigned long end, unsigned long hmm_pfns[],
0189 pmd_t pmd)
0190 {
0191 struct hmm_vma_walk *hmm_vma_walk = walk->private;
0192 struct hmm_range *range = hmm_vma_walk->range;
0193 unsigned long pfn, npages, i;
0194 unsigned int required_fault;
0195 unsigned long cpu_flags;
0196
0197 npages = (end - addr) >> PAGE_SHIFT;
0198 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
0199 required_fault =
0200 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
0201 if (required_fault)
0202 return hmm_vma_fault(addr, end, required_fault, walk);
0203
0204 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
0205 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
0206 hmm_pfns[i] = pfn | cpu_flags;
0207 return 0;
0208 }
0209 #else
0210
0211 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
0212 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
0213 #endif
0214
0215 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
0216 pte_t pte)
0217 {
0218 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
0219 return 0;
0220 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
0221 }
0222
0223 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
0224 unsigned long end, pmd_t *pmdp, pte_t *ptep,
0225 unsigned long *hmm_pfn)
0226 {
0227 struct hmm_vma_walk *hmm_vma_walk = walk->private;
0228 struct hmm_range *range = hmm_vma_walk->range;
0229 unsigned int required_fault;
0230 unsigned long cpu_flags;
0231 pte_t pte = *ptep;
0232 uint64_t pfn_req_flags = *hmm_pfn;
0233
0234 if (pte_none_mostly(pte)) {
0235 required_fault =
0236 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
0237 if (required_fault)
0238 goto fault;
0239 *hmm_pfn = 0;
0240 return 0;
0241 }
0242
0243 if (!pte_present(pte)) {
0244 swp_entry_t entry = pte_to_swp_entry(pte);
0245
0246
0247
0248
0249
0250 if (is_device_private_entry(entry) &&
0251 pfn_swap_entry_to_page(entry)->pgmap->owner ==
0252 range->dev_private_owner) {
0253 cpu_flags = HMM_PFN_VALID;
0254 if (is_writable_device_private_entry(entry))
0255 cpu_flags |= HMM_PFN_WRITE;
0256 *hmm_pfn = swp_offset(entry) | cpu_flags;
0257 return 0;
0258 }
0259
0260 required_fault =
0261 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
0262 if (!required_fault) {
0263 *hmm_pfn = 0;
0264 return 0;
0265 }
0266
0267 if (!non_swap_entry(entry))
0268 goto fault;
0269
0270 if (is_device_private_entry(entry))
0271 goto fault;
0272
0273 if (is_device_exclusive_entry(entry))
0274 goto fault;
0275
0276 if (is_migration_entry(entry)) {
0277 pte_unmap(ptep);
0278 hmm_vma_walk->last = addr;
0279 migration_entry_wait(walk->mm, pmdp, addr);
0280 return -EBUSY;
0281 }
0282
0283
0284 pte_unmap(ptep);
0285 return -EFAULT;
0286 }
0287
0288 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
0289 required_fault =
0290 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
0291 if (required_fault)
0292 goto fault;
0293
0294
0295
0296
0297
0298
0299
0300 if (!vm_normal_page(walk->vma, addr, pte) &&
0301 !pte_devmap(pte) &&
0302 !is_zero_pfn(pte_pfn(pte))) {
0303 if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
0304 pte_unmap(ptep);
0305 return -EFAULT;
0306 }
0307 *hmm_pfn = HMM_PFN_ERROR;
0308 return 0;
0309 }
0310
0311 *hmm_pfn = pte_pfn(pte) | cpu_flags;
0312 return 0;
0313
0314 fault:
0315 pte_unmap(ptep);
0316
0317 return hmm_vma_fault(addr, end, required_fault, walk);
0318 }
0319
0320 static int hmm_vma_walk_pmd(pmd_t *pmdp,
0321 unsigned long start,
0322 unsigned long end,
0323 struct mm_walk *walk)
0324 {
0325 struct hmm_vma_walk *hmm_vma_walk = walk->private;
0326 struct hmm_range *range = hmm_vma_walk->range;
0327 unsigned long *hmm_pfns =
0328 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
0329 unsigned long npages = (end - start) >> PAGE_SHIFT;
0330 unsigned long addr = start;
0331 pte_t *ptep;
0332 pmd_t pmd;
0333
0334 again:
0335 pmd = READ_ONCE(*pmdp);
0336 if (pmd_none(pmd))
0337 return hmm_vma_walk_hole(start, end, -1, walk);
0338
0339 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
0340 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
0341 hmm_vma_walk->last = addr;
0342 pmd_migration_entry_wait(walk->mm, pmdp);
0343 return -EBUSY;
0344 }
0345 return hmm_pfns_fill(start, end, range, 0);
0346 }
0347
0348 if (!pmd_present(pmd)) {
0349 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
0350 return -EFAULT;
0351 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
0352 }
0353
0354 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364 pmd = pmd_read_atomic(pmdp);
0365 barrier();
0366 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
0367 goto again;
0368
0369 return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
0370 }
0371
0372
0373
0374
0375
0376
0377
0378 if (pmd_bad(pmd)) {
0379 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
0380 return -EFAULT;
0381 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
0382 }
0383
0384 ptep = pte_offset_map(pmdp, addr);
0385 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
0386 int r;
0387
0388 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
0389 if (r) {
0390
0391 return r;
0392 }
0393 }
0394 pte_unmap(ptep - 1);
0395 return 0;
0396 }
0397
0398 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
0399 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
0400 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
0401 pud_t pud)
0402 {
0403 if (!pud_present(pud))
0404 return 0;
0405 return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
0406 HMM_PFN_VALID) |
0407 hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
0408 }
0409
0410 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
0411 struct mm_walk *walk)
0412 {
0413 struct hmm_vma_walk *hmm_vma_walk = walk->private;
0414 struct hmm_range *range = hmm_vma_walk->range;
0415 unsigned long addr = start;
0416 pud_t pud;
0417 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
0418
0419 if (!ptl)
0420 return 0;
0421
0422
0423 walk->action = ACTION_CONTINUE;
0424
0425 pud = READ_ONCE(*pudp);
0426 if (pud_none(pud)) {
0427 spin_unlock(ptl);
0428 return hmm_vma_walk_hole(start, end, -1, walk);
0429 }
0430
0431 if (pud_huge(pud) && pud_devmap(pud)) {
0432 unsigned long i, npages, pfn;
0433 unsigned int required_fault;
0434 unsigned long *hmm_pfns;
0435 unsigned long cpu_flags;
0436
0437 if (!pud_present(pud)) {
0438 spin_unlock(ptl);
0439 return hmm_vma_walk_hole(start, end, -1, walk);
0440 }
0441
0442 i = (addr - range->start) >> PAGE_SHIFT;
0443 npages = (end - addr) >> PAGE_SHIFT;
0444 hmm_pfns = &range->hmm_pfns[i];
0445
0446 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
0447 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
0448 npages, cpu_flags);
0449 if (required_fault) {
0450 spin_unlock(ptl);
0451 return hmm_vma_fault(addr, end, required_fault, walk);
0452 }
0453
0454 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
0455 for (i = 0; i < npages; ++i, ++pfn)
0456 hmm_pfns[i] = pfn | cpu_flags;
0457 goto out_unlock;
0458 }
0459
0460
0461 walk->action = ACTION_SUBTREE;
0462
0463 out_unlock:
0464 spin_unlock(ptl);
0465 return 0;
0466 }
0467 #else
0468 #define hmm_vma_walk_pud NULL
0469 #endif
0470
0471 #ifdef CONFIG_HUGETLB_PAGE
0472 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
0473 unsigned long start, unsigned long end,
0474 struct mm_walk *walk)
0475 {
0476 unsigned long addr = start, i, pfn;
0477 struct hmm_vma_walk *hmm_vma_walk = walk->private;
0478 struct hmm_range *range = hmm_vma_walk->range;
0479 struct vm_area_struct *vma = walk->vma;
0480 unsigned int required_fault;
0481 unsigned long pfn_req_flags;
0482 unsigned long cpu_flags;
0483 spinlock_t *ptl;
0484 pte_t entry;
0485
0486 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
0487 entry = huge_ptep_get(pte);
0488
0489 i = (start - range->start) >> PAGE_SHIFT;
0490 pfn_req_flags = range->hmm_pfns[i];
0491 cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
0492 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
0493 required_fault =
0494 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
0495 if (required_fault) {
0496 spin_unlock(ptl);
0497 return hmm_vma_fault(addr, end, required_fault, walk);
0498 }
0499
0500 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
0501 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
0502 range->hmm_pfns[i] = pfn | cpu_flags;
0503
0504 spin_unlock(ptl);
0505 return 0;
0506 }
0507 #else
0508 #define hmm_vma_walk_hugetlb_entry NULL
0509 #endif
0510
0511 static int hmm_vma_walk_test(unsigned long start, unsigned long end,
0512 struct mm_walk *walk)
0513 {
0514 struct hmm_vma_walk *hmm_vma_walk = walk->private;
0515 struct hmm_range *range = hmm_vma_walk->range;
0516 struct vm_area_struct *vma = walk->vma;
0517
0518 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
0519 vma->vm_flags & VM_READ)
0520 return 0;
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533 if (hmm_range_need_fault(hmm_vma_walk,
0534 range->hmm_pfns +
0535 ((start - range->start) >> PAGE_SHIFT),
0536 (end - start) >> PAGE_SHIFT, 0))
0537 return -EFAULT;
0538
0539 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
0540
0541
0542 return 1;
0543 }
0544
0545 static const struct mm_walk_ops hmm_walk_ops = {
0546 .pud_entry = hmm_vma_walk_pud,
0547 .pmd_entry = hmm_vma_walk_pmd,
0548 .pte_hole = hmm_vma_walk_hole,
0549 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
0550 .test_walk = hmm_vma_walk_test,
0551 };
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 int hmm_range_fault(struct hmm_range *range)
0573 {
0574 struct hmm_vma_walk hmm_vma_walk = {
0575 .range = range,
0576 .last = range->start,
0577 };
0578 struct mm_struct *mm = range->notifier->mm;
0579 int ret;
0580
0581 mmap_assert_locked(mm);
0582
0583 do {
0584
0585 if (mmu_interval_check_retry(range->notifier,
0586 range->notifier_seq))
0587 return -EBUSY;
0588 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
0589 &hmm_walk_ops, &hmm_vma_walk);
0590
0591
0592
0593
0594
0595
0596 } while (ret == -EBUSY);
0597 return ret;
0598 }
0599 EXPORT_SYMBOL(hmm_range_fault);