Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/pagewalk.h>
0003 #include <linux/hugetlb.h>
0004 #include <linux/bitops.h>
0005 #include <linux/mmu_notifier.h>
0006 #include <linux/mm_inline.h>
0007 #include <asm/cacheflush.h>
0008 #include <asm/tlbflush.h>
0009 
0010 /**
0011  * struct wp_walk - Private struct for pagetable walk callbacks
0012  * @range: Range for mmu notifiers
0013  * @tlbflush_start: Address of first modified pte
0014  * @tlbflush_end: Address of last modified pte + 1
0015  * @total: Total number of modified ptes
0016  */
0017 struct wp_walk {
0018     struct mmu_notifier_range range;
0019     unsigned long tlbflush_start;
0020     unsigned long tlbflush_end;
0021     unsigned long total;
0022 };
0023 
0024 /**
0025  * wp_pte - Write-protect a pte
0026  * @pte: Pointer to the pte
0027  * @addr: The start of protecting virtual address
0028  * @end: The end of protecting virtual address
0029  * @walk: pagetable walk callback argument
0030  *
0031  * The function write-protects a pte and records the range in
0032  * virtual address space of touched ptes for efficient range TLB flushes.
0033  */
0034 static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end,
0035           struct mm_walk *walk)
0036 {
0037     struct wp_walk *wpwalk = walk->private;
0038     pte_t ptent = *pte;
0039 
0040     if (pte_write(ptent)) {
0041         pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
0042 
0043         ptent = pte_wrprotect(old_pte);
0044         ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
0045         wpwalk->total++;
0046         wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
0047         wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
0048                        addr + PAGE_SIZE);
0049     }
0050 
0051     return 0;
0052 }
0053 
0054 /**
0055  * struct clean_walk - Private struct for the clean_record_pte function.
0056  * @base: struct wp_walk we derive from
0057  * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap
0058  * @bitmap: Bitmap with one bit for each page offset in the address_space range
0059  * covered.
0060  * @start: Address_space page offset of first modified pte relative
0061  * to @bitmap_pgoff
0062  * @end: Address_space page offset of last modified pte relative
0063  * to @bitmap_pgoff
0064  */
0065 struct clean_walk {
0066     struct wp_walk base;
0067     pgoff_t bitmap_pgoff;
0068     unsigned long *bitmap;
0069     pgoff_t start;
0070     pgoff_t end;
0071 };
0072 
0073 #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base)
0074 
0075 /**
0076  * clean_record_pte - Clean a pte and record its address space offset in a
0077  * bitmap
0078  * @pte: Pointer to the pte
0079  * @addr: The start of virtual address to be clean
0080  * @end: The end of virtual address to be clean
0081  * @walk: pagetable walk callback argument
0082  *
0083  * The function cleans a pte and records the range in
0084  * virtual address space of touched ptes for efficient TLB flushes.
0085  * It also records dirty ptes in a bitmap representing page offsets
0086  * in the address_space, as well as the first and last of the bits
0087  * touched.
0088  */
0089 static int clean_record_pte(pte_t *pte, unsigned long addr,
0090                 unsigned long end, struct mm_walk *walk)
0091 {
0092     struct wp_walk *wpwalk = walk->private;
0093     struct clean_walk *cwalk = to_clean_walk(wpwalk);
0094     pte_t ptent = *pte;
0095 
0096     if (pte_dirty(ptent)) {
0097         pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) +
0098             walk->vma->vm_pgoff - cwalk->bitmap_pgoff;
0099         pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
0100 
0101         ptent = pte_mkclean(old_pte);
0102         ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
0103 
0104         wpwalk->total++;
0105         wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
0106         wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
0107                        addr + PAGE_SIZE);
0108 
0109         __set_bit(pgoff, cwalk->bitmap);
0110         cwalk->start = min(cwalk->start, pgoff);
0111         cwalk->end = max(cwalk->end, pgoff + 1);
0112     }
0113 
0114     return 0;
0115 }
0116 
0117 /*
0118  * wp_clean_pmd_entry - The pagewalk pmd callback.
0119  *
0120  * Dirty-tracking should take place on the PTE level, so
0121  * WARN() if encountering a dirty huge pmd.
0122  * Furthermore, never split huge pmds, since that currently
0123  * causes dirty info loss. The pagefault handler should do
0124  * that if needed.
0125  */
0126 static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
0127                   struct mm_walk *walk)
0128 {
0129     pmd_t pmdval = pmd_read_atomic(pmd);
0130 
0131     if (!pmd_trans_unstable(&pmdval))
0132         return 0;
0133 
0134     if (pmd_none(pmdval)) {
0135         walk->action = ACTION_AGAIN;
0136         return 0;
0137     }
0138 
0139     /* Huge pmd, present or migrated */
0140     walk->action = ACTION_CONTINUE;
0141     if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval))
0142         WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
0143 
0144     return 0;
0145 }
0146 
0147 /*
0148  * wp_clean_pud_entry - The pagewalk pud callback.
0149  *
0150  * Dirty-tracking should take place on the PTE level, so
0151  * WARN() if encountering a dirty huge puds.
0152  * Furthermore, never split huge puds, since that currently
0153  * causes dirty info loss. The pagefault handler should do
0154  * that if needed.
0155  */
0156 static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
0157                   struct mm_walk *walk)
0158 {
0159     pud_t pudval = READ_ONCE(*pud);
0160 
0161     if (!pud_trans_unstable(&pudval))
0162         return 0;
0163 
0164     if (pud_none(pudval)) {
0165         walk->action = ACTION_AGAIN;
0166         return 0;
0167     }
0168 
0169 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
0170     /* Huge pud */
0171     walk->action = ACTION_CONTINUE;
0172     if (pud_trans_huge(pudval) || pud_devmap(pudval))
0173         WARN_ON(pud_write(pudval) || pud_dirty(pudval));
0174 #endif
0175 
0176     return 0;
0177 }
0178 
0179 /*
0180  * wp_clean_pre_vma - The pagewalk pre_vma callback.
0181  *
0182  * The pre_vma callback performs the cache flush, stages the tlb flush
0183  * and calls the necessary mmu notifiers.
0184  */
0185 static int wp_clean_pre_vma(unsigned long start, unsigned long end,
0186                 struct mm_walk *walk)
0187 {
0188     struct wp_walk *wpwalk = walk->private;
0189 
0190     wpwalk->tlbflush_start = end;
0191     wpwalk->tlbflush_end = start;
0192 
0193     mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0,
0194                 walk->vma, walk->mm, start, end);
0195     mmu_notifier_invalidate_range_start(&wpwalk->range);
0196     flush_cache_range(walk->vma, start, end);
0197 
0198     /*
0199      * We're not using tlb_gather_mmu() since typically
0200      * only a small subrange of PTEs are affected, whereas
0201      * tlb_gather_mmu() records the full range.
0202      */
0203     inc_tlb_flush_pending(walk->mm);
0204 
0205     return 0;
0206 }
0207 
0208 /*
0209  * wp_clean_post_vma - The pagewalk post_vma callback.
0210  *
0211  * The post_vma callback performs the tlb flush and calls necessary mmu
0212  * notifiers.
0213  */
0214 static void wp_clean_post_vma(struct mm_walk *walk)
0215 {
0216     struct wp_walk *wpwalk = walk->private;
0217 
0218     if (mm_tlb_flush_nested(walk->mm))
0219         flush_tlb_range(walk->vma, wpwalk->range.start,
0220                 wpwalk->range.end);
0221     else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start)
0222         flush_tlb_range(walk->vma, wpwalk->tlbflush_start,
0223                 wpwalk->tlbflush_end);
0224 
0225     mmu_notifier_invalidate_range_end(&wpwalk->range);
0226     dec_tlb_flush_pending(walk->mm);
0227 }
0228 
0229 /*
0230  * wp_clean_test_walk - The pagewalk test_walk callback.
0231  *
0232  * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas.
0233  */
0234 static int wp_clean_test_walk(unsigned long start, unsigned long end,
0235                   struct mm_walk *walk)
0236 {
0237     unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags);
0238 
0239     /* Skip non-applicable VMAs */
0240     if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) !=
0241         (VM_SHARED | VM_MAYWRITE))
0242         return 1;
0243 
0244     return 0;
0245 }
0246 
0247 static const struct mm_walk_ops clean_walk_ops = {
0248     .pte_entry = clean_record_pte,
0249     .pmd_entry = wp_clean_pmd_entry,
0250     .pud_entry = wp_clean_pud_entry,
0251     .test_walk = wp_clean_test_walk,
0252     .pre_vma = wp_clean_pre_vma,
0253     .post_vma = wp_clean_post_vma
0254 };
0255 
0256 static const struct mm_walk_ops wp_walk_ops = {
0257     .pte_entry = wp_pte,
0258     .pmd_entry = wp_clean_pmd_entry,
0259     .pud_entry = wp_clean_pud_entry,
0260     .test_walk = wp_clean_test_walk,
0261     .pre_vma = wp_clean_pre_vma,
0262     .post_vma = wp_clean_post_vma
0263 };
0264 
0265 /**
0266  * wp_shared_mapping_range - Write-protect all ptes in an address space range
0267  * @mapping: The address_space we want to write protect
0268  * @first_index: The first page offset in the range
0269  * @nr: Number of incremental page offsets to cover
0270  *
0271  * Note: This function currently skips transhuge page-table entries, since
0272  * it's intended for dirty-tracking on the PTE level. It will warn on
0273  * encountering transhuge write-enabled entries, though, and can easily be
0274  * extended to handle them as well.
0275  *
0276  * Return: The number of ptes actually write-protected. Note that
0277  * already write-protected ptes are not counted.
0278  */
0279 unsigned long wp_shared_mapping_range(struct address_space *mapping,
0280                       pgoff_t first_index, pgoff_t nr)
0281 {
0282     struct wp_walk wpwalk = { .total = 0 };
0283 
0284     i_mmap_lock_read(mapping);
0285     WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops,
0286                   &wpwalk));
0287     i_mmap_unlock_read(mapping);
0288 
0289     return wpwalk.total;
0290 }
0291 EXPORT_SYMBOL_GPL(wp_shared_mapping_range);
0292 
0293 /**
0294  * clean_record_shared_mapping_range - Clean and record all ptes in an
0295  * address space range
0296  * @mapping: The address_space we want to clean
0297  * @first_index: The first page offset in the range
0298  * @nr: Number of incremental page offsets to cover
0299  * @bitmap_pgoff: The page offset of the first bit in @bitmap
0300  * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to
0301  * cover the whole range @first_index..@first_index + @nr.
0302  * @start: Pointer to number of the first set bit in @bitmap.
0303  * is modified as new bits are set by the function.
0304  * @end: Pointer to the number of the last set bit in @bitmap.
0305  * none set. The value is modified as new bits are set by the function.
0306  *
0307  * Note: When this function returns there is no guarantee that a CPU has
0308  * not already dirtied new ptes. However it will not clean any ptes not
0309  * reported in the bitmap. The guarantees are as follows:
0310  * a) All ptes dirty when the function starts executing will end up recorded
0311  *    in the bitmap.
0312  * b) All ptes dirtied after that will either remain dirty, be recorded in the
0313  *    bitmap or both.
0314  *
0315  * If a caller needs to make sure all dirty ptes are picked up and none
0316  * additional are added, it first needs to write-protect the address-space
0317  * range and make sure new writers are blocked in page_mkwrite() or
0318  * pfn_mkwrite(). And then after a TLB flush following the write-protection
0319  * pick up all dirty bits.
0320  *
0321  * This function currently skips transhuge page-table entries, since
0322  * it's intended for dirty-tracking on the PTE level. It will warn on
0323  * encountering transhuge dirty entries, though, and can easily be extended
0324  * to handle them as well.
0325  *
0326  * Return: The number of dirty ptes actually cleaned.
0327  */
0328 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
0329                         pgoff_t first_index, pgoff_t nr,
0330                         pgoff_t bitmap_pgoff,
0331                         unsigned long *bitmap,
0332                         pgoff_t *start,
0333                         pgoff_t *end)
0334 {
0335     bool none_set = (*start >= *end);
0336     struct clean_walk cwalk = {
0337         .base = { .total = 0 },
0338         .bitmap_pgoff = bitmap_pgoff,
0339         .bitmap = bitmap,
0340         .start = none_set ? nr : *start,
0341         .end = none_set ? 0 : *end,
0342     };
0343 
0344     i_mmap_lock_read(mapping);
0345     WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops,
0346                   &cwalk.base));
0347     i_mmap_unlock_read(mapping);
0348 
0349     *start = cwalk.start;
0350     *end = cwalk.end;
0351 
0352     return cwalk.base.total;
0353 }
0354 EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range);