Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_PAGEWALK_H
0003 #define _LINUX_PAGEWALK_H
0004 
0005 #include <linux/mm.h>
0006 
0007 struct mm_walk;
0008 
0009 /**
0010  * struct mm_walk_ops - callbacks for walk_page_range
0011  * @pgd_entry:      if set, called for each non-empty PGD (top-level) entry
0012  * @p4d_entry:      if set, called for each non-empty P4D entry
0013  * @pud_entry:      if set, called for each non-empty PUD entry
0014  * @pmd_entry:      if set, called for each non-empty PMD entry
0015  *          this handler is required to be able to handle
0016  *          pmd_trans_huge() pmds.  They may simply choose to
0017  *          split_huge_page() instead of handling it explicitly.
0018  * @pte_entry:      if set, called for each non-empty PTE (lowest-level)
0019  *          entry
0020  * @pte_hole:       if set, called for each hole at all levels,
0021  *          depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
0022  *          4:PTE. Any folded depths (where PTRS_PER_P?D is equal
0023  *          to 1) are skipped.
0024  * @hugetlb_entry:  if set, called for each hugetlb entry
0025  * @test_walk:      caller specific callback function to determine whether
0026  *          we walk over the current vma or not. Returning 0 means
0027  *          "do page table walk over the current vma", returning
0028  *          a negative value means "abort current page table walk
0029  *          right now" and returning 1 means "skip the current vma"
0030  * @pre_vma:            if set, called before starting walk on a non-null vma.
0031  * @post_vma:           if set, called after a walk on a non-null vma, provided
0032  *                      that @pre_vma and the vma walk succeeded.
0033  *
0034  * p?d_entry callbacks are called even if those levels are folded on a
0035  * particular architecture/configuration.
0036  */
0037 struct mm_walk_ops {
0038     int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
0039              unsigned long next, struct mm_walk *walk);
0040     int (*p4d_entry)(p4d_t *p4d, unsigned long addr,
0041              unsigned long next, struct mm_walk *walk);
0042     int (*pud_entry)(pud_t *pud, unsigned long addr,
0043              unsigned long next, struct mm_walk *walk);
0044     int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
0045              unsigned long next, struct mm_walk *walk);
0046     int (*pte_entry)(pte_t *pte, unsigned long addr,
0047              unsigned long next, struct mm_walk *walk);
0048     int (*pte_hole)(unsigned long addr, unsigned long next,
0049             int depth, struct mm_walk *walk);
0050     int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
0051                  unsigned long addr, unsigned long next,
0052                  struct mm_walk *walk);
0053     int (*test_walk)(unsigned long addr, unsigned long next,
0054             struct mm_walk *walk);
0055     int (*pre_vma)(unsigned long start, unsigned long end,
0056                struct mm_walk *walk);
0057     void (*post_vma)(struct mm_walk *walk);
0058 };
0059 
0060 /*
0061  * Action for pud_entry / pmd_entry callbacks.
0062  * ACTION_SUBTREE is the default
0063  */
0064 enum page_walk_action {
0065     /* Descend to next level, splitting huge pages if needed and possible */
0066     ACTION_SUBTREE = 0,
0067     /* Continue to next entry at this level (ignoring any subtree) */
0068     ACTION_CONTINUE = 1,
0069     /* Call again for this entry */
0070     ACTION_AGAIN = 2
0071 };
0072 
0073 /**
0074  * struct mm_walk - walk_page_range data
0075  * @ops:    operation to call during the walk
0076  * @mm:     mm_struct representing the target process of page table walk
0077  * @pgd:    pointer to PGD; only valid with no_vma (otherwise set to NULL)
0078  * @vma:    vma currently walked (NULL if walking outside vmas)
0079  * @action: next action to perform (see enum page_walk_action)
0080  * @no_vma: walk ignoring vmas (vma will always be NULL)
0081  * @private:    private data for callbacks' usage
0082  *
0083  * (see the comment on walk_page_range() for more details)
0084  */
0085 struct mm_walk {
0086     const struct mm_walk_ops *ops;
0087     struct mm_struct *mm;
0088     pgd_t *pgd;
0089     struct vm_area_struct *vma;
0090     enum page_walk_action action;
0091     bool no_vma;
0092     void *private;
0093 };
0094 
0095 int walk_page_range(struct mm_struct *mm, unsigned long start,
0096         unsigned long end, const struct mm_walk_ops *ops,
0097         void *private);
0098 int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
0099               unsigned long end, const struct mm_walk_ops *ops,
0100               pgd_t *pgd,
0101               void *private);
0102 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
0103         void *private);
0104 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
0105               pgoff_t nr, const struct mm_walk_ops *ops,
0106               void *private);
0107 
0108 #endif /* _LINUX_PAGEWALK_H */