Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_MMU_NOTIFIER_H
0003 #define _LINUX_MMU_NOTIFIER_H
0004 
0005 #include <linux/list.h>
0006 #include <linux/spinlock.h>
0007 #include <linux/mm_types.h>
0008 #include <linux/mmap_lock.h>
0009 #include <linux/srcu.h>
0010 #include <linux/interval_tree.h>
0011 
0012 struct mmu_notifier_subscriptions;
0013 struct mmu_notifier;
0014 struct mmu_notifier_range;
0015 struct mmu_interval_notifier;
0016 
0017 /**
0018  * enum mmu_notifier_event - reason for the mmu notifier callback
0019  * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
0020  * move the range
0021  *
0022  * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
0023  * madvise() or replacing a page by another one, ...).
0024  *
0025  * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
0026  * ie using the vma access permission (vm_page_prot) to update the whole range
0027  * is enough no need to inspect changes to the CPU page table (mprotect()
0028  * syscall)
0029  *
0030  * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
0031  * pages in the range so to mirror those changes the user must inspect the CPU
0032  * page table (from the end callback).
0033  *
0034  * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
0035  * access flags). User should soft dirty the page in the end callback to make
0036  * sure that anyone relying on soft dirtiness catch pages that might be written
0037  * through non CPU mappings.
0038  *
0039  * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
0040  * that the mm refcount is zero and the range is no longer accessible.
0041  *
0042  * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
0043  * a device driver to possibly ignore the invalidation if the
0044  * owner field matches the driver's device private pgmap owner.
0045  *
0046  * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
0047  * longer have exclusive access to the page. When sent during creation of an
0048  * exclusive range the owner will be initialised to the value provided by the
0049  * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
0050  */
0051 enum mmu_notifier_event {
0052     MMU_NOTIFY_UNMAP = 0,
0053     MMU_NOTIFY_CLEAR,
0054     MMU_NOTIFY_PROTECTION_VMA,
0055     MMU_NOTIFY_PROTECTION_PAGE,
0056     MMU_NOTIFY_SOFT_DIRTY,
0057     MMU_NOTIFY_RELEASE,
0058     MMU_NOTIFY_MIGRATE,
0059     MMU_NOTIFY_EXCLUSIVE,
0060 };
0061 
0062 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
0063 
0064 struct mmu_notifier_ops {
0065     /*
0066      * Called either by mmu_notifier_unregister or when the mm is
0067      * being destroyed by exit_mmap, always before all pages are
0068      * freed. This can run concurrently with other mmu notifier
0069      * methods (the ones invoked outside the mm context) and it
0070      * should tear down all secondary mmu mappings and freeze the
0071      * secondary mmu. If this method isn't implemented you've to
0072      * be sure that nothing could possibly write to the pages
0073      * through the secondary mmu by the time the last thread with
0074      * tsk->mm == mm exits.
0075      *
0076      * As side note: the pages freed after ->release returns could
0077      * be immediately reallocated by the gart at an alias physical
0078      * address with a different cache model, so if ->release isn't
0079      * implemented because all _software_ driven memory accesses
0080      * through the secondary mmu are terminated by the time the
0081      * last thread of this mm quits, you've also to be sure that
0082      * speculative _hardware_ operations can't allocate dirty
0083      * cachelines in the cpu that could not be snooped and made
0084      * coherent with the other read and write operations happening
0085      * through the gart alias address, so leading to memory
0086      * corruption.
0087      */
0088     void (*release)(struct mmu_notifier *subscription,
0089             struct mm_struct *mm);
0090 
0091     /*
0092      * clear_flush_young is called after the VM is
0093      * test-and-clearing the young/accessed bitflag in the
0094      * pte. This way the VM will provide proper aging to the
0095      * accesses to the page through the secondary MMUs and not
0096      * only to the ones through the Linux pte.
0097      * Start-end is necessary in case the secondary MMU is mapping the page
0098      * at a smaller granularity than the primary MMU.
0099      */
0100     int (*clear_flush_young)(struct mmu_notifier *subscription,
0101                  struct mm_struct *mm,
0102                  unsigned long start,
0103                  unsigned long end);
0104 
0105     /*
0106      * clear_young is a lightweight version of clear_flush_young. Like the
0107      * latter, it is supposed to test-and-clear the young/accessed bitflag
0108      * in the secondary pte, but it may omit flushing the secondary tlb.
0109      */
0110     int (*clear_young)(struct mmu_notifier *subscription,
0111                struct mm_struct *mm,
0112                unsigned long start,
0113                unsigned long end);
0114 
0115     /*
0116      * test_young is called to check the young/accessed bitflag in
0117      * the secondary pte. This is used to know if the page is
0118      * frequently used without actually clearing the flag or tearing
0119      * down the secondary mapping on the page.
0120      */
0121     int (*test_young)(struct mmu_notifier *subscription,
0122               struct mm_struct *mm,
0123               unsigned long address);
0124 
0125     /*
0126      * change_pte is called in cases that pte mapping to page is changed:
0127      * for example, when ksm remaps pte to point to a new shared page.
0128      */
0129     void (*change_pte)(struct mmu_notifier *subscription,
0130                struct mm_struct *mm,
0131                unsigned long address,
0132                pte_t pte);
0133 
0134     /*
0135      * invalidate_range_start() and invalidate_range_end() must be
0136      * paired and are called only when the mmap_lock and/or the
0137      * locks protecting the reverse maps are held. If the subsystem
0138      * can't guarantee that no additional references are taken to
0139      * the pages in the range, it has to implement the
0140      * invalidate_range() notifier to remove any references taken
0141      * after invalidate_range_start().
0142      *
0143      * Invalidation of multiple concurrent ranges may be
0144      * optionally permitted by the driver. Either way the
0145      * establishment of sptes is forbidden in the range passed to
0146      * invalidate_range_begin/end for the whole duration of the
0147      * invalidate_range_begin/end critical section.
0148      *
0149      * invalidate_range_start() is called when all pages in the
0150      * range are still mapped and have at least a refcount of one.
0151      *
0152      * invalidate_range_end() is called when all pages in the
0153      * range have been unmapped and the pages have been freed by
0154      * the VM.
0155      *
0156      * The VM will remove the page table entries and potentially
0157      * the page between invalidate_range_start() and
0158      * invalidate_range_end(). If the page must not be freed
0159      * because of pending I/O or other circumstances then the
0160      * invalidate_range_start() callback (or the initial mapping
0161      * by the driver) must make sure that the refcount is kept
0162      * elevated.
0163      *
0164      * If the driver increases the refcount when the pages are
0165      * initially mapped into an address space then either
0166      * invalidate_range_start() or invalidate_range_end() may
0167      * decrease the refcount. If the refcount is decreased on
0168      * invalidate_range_start() then the VM can free pages as page
0169      * table entries are removed.  If the refcount is only
0170      * dropped on invalidate_range_end() then the driver itself
0171      * will drop the last refcount but it must take care to flush
0172      * any secondary tlb before doing the final free on the
0173      * page. Pages will no longer be referenced by the linux
0174      * address space but may still be referenced by sptes until
0175      * the last refcount is dropped.
0176      *
0177      * If blockable argument is set to false then the callback cannot
0178      * sleep and has to return with -EAGAIN if sleeping would be required.
0179      * 0 should be returned otherwise. Please note that notifiers that can
0180      * fail invalidate_range_start are not allowed to implement
0181      * invalidate_range_end, as there is no mechanism for informing the
0182      * notifier that its start failed.
0183      */
0184     int (*invalidate_range_start)(struct mmu_notifier *subscription,
0185                       const struct mmu_notifier_range *range);
0186     void (*invalidate_range_end)(struct mmu_notifier *subscription,
0187                      const struct mmu_notifier_range *range);
0188 
0189     /*
0190      * invalidate_range() is either called between
0191      * invalidate_range_start() and invalidate_range_end() when the
0192      * VM has to free pages that where unmapped, but before the
0193      * pages are actually freed, or outside of _start()/_end() when
0194      * a (remote) TLB is necessary.
0195      *
0196      * If invalidate_range() is used to manage a non-CPU TLB with
0197      * shared page-tables, it not necessary to implement the
0198      * invalidate_range_start()/end() notifiers, as
0199      * invalidate_range() already catches the points in time when an
0200      * external TLB range needs to be flushed. For more in depth
0201      * discussion on this see Documentation/mm/mmu_notifier.rst
0202      *
0203      * Note that this function might be called with just a sub-range
0204      * of what was passed to invalidate_range_start()/end(), if
0205      * called between those functions.
0206      */
0207     void (*invalidate_range)(struct mmu_notifier *subscription,
0208                  struct mm_struct *mm,
0209                  unsigned long start,
0210                  unsigned long end);
0211 
0212     /*
0213      * These callbacks are used with the get/put interface to manage the
0214      * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
0215      * notifier for use with the mm.
0216      *
0217      * free_notifier() is only called after the mmu_notifier has been
0218      * fully put, calls to any ops callback are prevented and no ops
0219      * callbacks are currently running. It is called from a SRCU callback
0220      * and cannot sleep.
0221      */
0222     struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
0223     void (*free_notifier)(struct mmu_notifier *subscription);
0224 };
0225 
0226 /*
0227  * The notifier chains are protected by mmap_lock and/or the reverse map
0228  * semaphores. Notifier chains are only changed when all reverse maps and
0229  * the mmap_lock locks are taken.
0230  *
0231  * Therefore notifier chains can only be traversed when either
0232  *
0233  * 1. mmap_lock is held.
0234  * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
0235  * 3. No other concurrent thread can access the list (release)
0236  */
0237 struct mmu_notifier {
0238     struct hlist_node hlist;
0239     const struct mmu_notifier_ops *ops;
0240     struct mm_struct *mm;
0241     struct rcu_head rcu;
0242     unsigned int users;
0243 };
0244 
0245 /**
0246  * struct mmu_interval_notifier_ops
0247  * @invalidate: Upon return the caller must stop using any SPTEs within this
0248  *              range. This function can sleep. Return false only if sleeping
0249  *              was required but mmu_notifier_range_blockable(range) is false.
0250  */
0251 struct mmu_interval_notifier_ops {
0252     bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
0253                const struct mmu_notifier_range *range,
0254                unsigned long cur_seq);
0255 };
0256 
0257 struct mmu_interval_notifier {
0258     struct interval_tree_node interval_tree;
0259     const struct mmu_interval_notifier_ops *ops;
0260     struct mm_struct *mm;
0261     struct hlist_node deferred_item;
0262     unsigned long invalidate_seq;
0263 };
0264 
0265 #ifdef CONFIG_MMU_NOTIFIER
0266 
0267 #ifdef CONFIG_LOCKDEP
0268 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
0269 #endif
0270 
0271 struct mmu_notifier_range {
0272     struct vm_area_struct *vma;
0273     struct mm_struct *mm;
0274     unsigned long start;
0275     unsigned long end;
0276     unsigned flags;
0277     enum mmu_notifier_event event;
0278     void *owner;
0279 };
0280 
0281 static inline int mm_has_notifiers(struct mm_struct *mm)
0282 {
0283     return unlikely(mm->notifier_subscriptions);
0284 }
0285 
0286 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
0287                          struct mm_struct *mm);
0288 static inline struct mmu_notifier *
0289 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
0290 {
0291     struct mmu_notifier *ret;
0292 
0293     mmap_write_lock(mm);
0294     ret = mmu_notifier_get_locked(ops, mm);
0295     mmap_write_unlock(mm);
0296     return ret;
0297 }
0298 void mmu_notifier_put(struct mmu_notifier *subscription);
0299 void mmu_notifier_synchronize(void);
0300 
0301 extern int mmu_notifier_register(struct mmu_notifier *subscription,
0302                  struct mm_struct *mm);
0303 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
0304                    struct mm_struct *mm);
0305 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
0306                     struct mm_struct *mm);
0307 
0308 unsigned long
0309 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
0310 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
0311                  struct mm_struct *mm, unsigned long start,
0312                  unsigned long length,
0313                  const struct mmu_interval_notifier_ops *ops);
0314 int mmu_interval_notifier_insert_locked(
0315     struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
0316     unsigned long start, unsigned long length,
0317     const struct mmu_interval_notifier_ops *ops);
0318 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
0319 
0320 /**
0321  * mmu_interval_set_seq - Save the invalidation sequence
0322  * @interval_sub - The subscription passed to invalidate
0323  * @cur_seq - The cur_seq passed to the invalidate() callback
0324  *
0325  * This must be called unconditionally from the invalidate callback of a
0326  * struct mmu_interval_notifier_ops under the same lock that is used to call
0327  * mmu_interval_read_retry(). It updates the sequence number for later use by
0328  * mmu_interval_read_retry(). The provided cur_seq will always be odd.
0329  *
0330  * If the caller does not call mmu_interval_read_begin() or
0331  * mmu_interval_read_retry() then this call is not required.
0332  */
0333 static inline void
0334 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
0335              unsigned long cur_seq)
0336 {
0337     WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
0338 }
0339 
0340 /**
0341  * mmu_interval_read_retry - End a read side critical section against a VA range
0342  * interval_sub: The subscription
0343  * seq: The return of the paired mmu_interval_read_begin()
0344  *
0345  * This MUST be called under a user provided lock that is also held
0346  * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
0347  *
0348  * Each call should be paired with a single mmu_interval_read_begin() and
0349  * should be used to conclude the read side.
0350  *
0351  * Returns true if an invalidation collided with this critical section, and
0352  * the caller should retry.
0353  */
0354 static inline bool
0355 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
0356             unsigned long seq)
0357 {
0358     return interval_sub->invalidate_seq != seq;
0359 }
0360 
0361 /**
0362  * mmu_interval_check_retry - Test if a collision has occurred
0363  * interval_sub: The subscription
0364  * seq: The return of the matching mmu_interval_read_begin()
0365  *
0366  * This can be used in the critical section between mmu_interval_read_begin()
0367  * and mmu_interval_read_retry().  A return of true indicates an invalidation
0368  * has collided with this critical region and a future
0369  * mmu_interval_read_retry() will return true.
0370  *
0371  * False is not reliable and only suggests a collision may not have
0372  * occurred. It can be called many times and does not have to hold the user
0373  * provided lock.
0374  *
0375  * This call can be used as part of loops and other expensive operations to
0376  * expedite a retry.
0377  */
0378 static inline bool
0379 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
0380              unsigned long seq)
0381 {
0382     /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
0383     return READ_ONCE(interval_sub->invalidate_seq) != seq;
0384 }
0385 
0386 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
0387 extern void __mmu_notifier_release(struct mm_struct *mm);
0388 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
0389                       unsigned long start,
0390                       unsigned long end);
0391 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
0392                       unsigned long start,
0393                       unsigned long end);
0394 extern int __mmu_notifier_test_young(struct mm_struct *mm,
0395                      unsigned long address);
0396 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
0397                       unsigned long address, pte_t pte);
0398 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
0399 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
0400                   bool only_end);
0401 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
0402                   unsigned long start, unsigned long end);
0403 extern bool
0404 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
0405 
0406 static inline bool
0407 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
0408 {
0409     return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
0410 }
0411 
0412 static inline void mmu_notifier_release(struct mm_struct *mm)
0413 {
0414     if (mm_has_notifiers(mm))
0415         __mmu_notifier_release(mm);
0416 }
0417 
0418 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
0419                       unsigned long start,
0420                       unsigned long end)
0421 {
0422     if (mm_has_notifiers(mm))
0423         return __mmu_notifier_clear_flush_young(mm, start, end);
0424     return 0;
0425 }
0426 
0427 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
0428                        unsigned long start,
0429                        unsigned long end)
0430 {
0431     if (mm_has_notifiers(mm))
0432         return __mmu_notifier_clear_young(mm, start, end);
0433     return 0;
0434 }
0435 
0436 static inline int mmu_notifier_test_young(struct mm_struct *mm,
0437                       unsigned long address)
0438 {
0439     if (mm_has_notifiers(mm))
0440         return __mmu_notifier_test_young(mm, address);
0441     return 0;
0442 }
0443 
0444 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
0445                        unsigned long address, pte_t pte)
0446 {
0447     if (mm_has_notifiers(mm))
0448         __mmu_notifier_change_pte(mm, address, pte);
0449 }
0450 
0451 static inline void
0452 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
0453 {
0454     might_sleep();
0455 
0456     lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
0457     if (mm_has_notifiers(range->mm)) {
0458         range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
0459         __mmu_notifier_invalidate_range_start(range);
0460     }
0461     lock_map_release(&__mmu_notifier_invalidate_range_start_map);
0462 }
0463 
0464 static inline int
0465 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
0466 {
0467     int ret = 0;
0468 
0469     lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
0470     if (mm_has_notifiers(range->mm)) {
0471         range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
0472         ret = __mmu_notifier_invalidate_range_start(range);
0473     }
0474     lock_map_release(&__mmu_notifier_invalidate_range_start_map);
0475     return ret;
0476 }
0477 
0478 static inline void
0479 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
0480 {
0481     if (mmu_notifier_range_blockable(range))
0482         might_sleep();
0483 
0484     if (mm_has_notifiers(range->mm))
0485         __mmu_notifier_invalidate_range_end(range, false);
0486 }
0487 
0488 static inline void
0489 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
0490 {
0491     if (mm_has_notifiers(range->mm))
0492         __mmu_notifier_invalidate_range_end(range, true);
0493 }
0494 
0495 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
0496                   unsigned long start, unsigned long end)
0497 {
0498     if (mm_has_notifiers(mm))
0499         __mmu_notifier_invalidate_range(mm, start, end);
0500 }
0501 
0502 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
0503 {
0504     mm->notifier_subscriptions = NULL;
0505 }
0506 
0507 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
0508 {
0509     if (mm_has_notifiers(mm))
0510         __mmu_notifier_subscriptions_destroy(mm);
0511 }
0512 
0513 
0514 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
0515                        enum mmu_notifier_event event,
0516                        unsigned flags,
0517                        struct vm_area_struct *vma,
0518                        struct mm_struct *mm,
0519                        unsigned long start,
0520                        unsigned long end)
0521 {
0522     range->vma = vma;
0523     range->event = event;
0524     range->mm = mm;
0525     range->start = start;
0526     range->end = end;
0527     range->flags = flags;
0528 }
0529 
0530 static inline void mmu_notifier_range_init_owner(
0531             struct mmu_notifier_range *range,
0532             enum mmu_notifier_event event, unsigned int flags,
0533             struct vm_area_struct *vma, struct mm_struct *mm,
0534             unsigned long start, unsigned long end, void *owner)
0535 {
0536     mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
0537     range->owner = owner;
0538 }
0539 
0540 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)     \
0541 ({                                  \
0542     int __young;                            \
0543     struct vm_area_struct *___vma = __vma;              \
0544     unsigned long ___address = __address;               \
0545     __young = ptep_clear_flush_young(___vma, ___address, __ptep);   \
0546     __young |= mmu_notifier_clear_flush_young(___vma->vm_mm,    \
0547                           ___address,       \
0548                           ___address +      \
0549                             PAGE_SIZE); \
0550     __young;                            \
0551 })
0552 
0553 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)     \
0554 ({                                  \
0555     int __young;                            \
0556     struct vm_area_struct *___vma = __vma;              \
0557     unsigned long ___address = __address;               \
0558     __young = pmdp_clear_flush_young(___vma, ___address, __pmdp);   \
0559     __young |= mmu_notifier_clear_flush_young(___vma->vm_mm,    \
0560                           ___address,       \
0561                           ___address +      \
0562                             PMD_SIZE);  \
0563     __young;                            \
0564 })
0565 
0566 #define ptep_clear_young_notify(__vma, __address, __ptep)       \
0567 ({                                  \
0568     int __young;                            \
0569     struct vm_area_struct *___vma = __vma;              \
0570     unsigned long ___address = __address;               \
0571     __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
0572     __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,  \
0573                         ___address + PAGE_SIZE);    \
0574     __young;                            \
0575 })
0576 
0577 #define pmdp_clear_young_notify(__vma, __address, __pmdp)       \
0578 ({                                  \
0579     int __young;                            \
0580     struct vm_area_struct *___vma = __vma;              \
0581     unsigned long ___address = __address;               \
0582     __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
0583     __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,  \
0584                         ___address + PMD_SIZE); \
0585     __young;                            \
0586 })
0587 
0588 #define ptep_clear_flush_notify(__vma, __address, __ptep)       \
0589 ({                                  \
0590     unsigned long ___addr = __address & PAGE_MASK;          \
0591     struct mm_struct *___mm = (__vma)->vm_mm;           \
0592     pte_t ___pte;                           \
0593                                     \
0594     ___pte = ptep_clear_flush(__vma, __address, __ptep);        \
0595     mmu_notifier_invalidate_range(___mm, ___addr,           \
0596                     ___addr + PAGE_SIZE);       \
0597                                     \
0598     ___pte;                             \
0599 })
0600 
0601 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)     \
0602 ({                                  \
0603     unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;      \
0604     struct mm_struct *___mm = (__vma)->vm_mm;           \
0605     pmd_t ___pmd;                           \
0606                                     \
0607     ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);      \
0608     mmu_notifier_invalidate_range(___mm, ___haddr,          \
0609                       ___haddr + HPAGE_PMD_SIZE);   \
0610                                     \
0611     ___pmd;                             \
0612 })
0613 
0614 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)     \
0615 ({                                  \
0616     unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;      \
0617     struct mm_struct *___mm = (__vma)->vm_mm;           \
0618     pud_t ___pud;                           \
0619                                     \
0620     ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);      \
0621     mmu_notifier_invalidate_range(___mm, ___haddr,          \
0622                       ___haddr + HPAGE_PUD_SIZE);   \
0623                                     \
0624     ___pud;                             \
0625 })
0626 
0627 /*
0628  * set_pte_at_notify() sets the pte _after_ running the notifier.
0629  * This is safe to start by updating the secondary MMUs, because the primary MMU
0630  * pte invalidate must have already happened with a ptep_clear_flush() before
0631  * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
0632  * required when we change both the protection of the mapping from read-only to
0633  * read-write and the pfn (like during copy on write page faults). Otherwise the
0634  * old page would remain mapped readonly in the secondary MMUs after the new
0635  * page is already writable by some CPU through the primary MMU.
0636  */
0637 #define set_pte_at_notify(__mm, __address, __ptep, __pte)       \
0638 ({                                  \
0639     struct mm_struct *___mm = __mm;                 \
0640     unsigned long ___address = __address;               \
0641     pte_t ___pte = __pte;                       \
0642                                     \
0643     mmu_notifier_change_pte(___mm, ___address, ___pte);     \
0644     set_pte_at(___mm, ___address, __ptep, ___pte);          \
0645 })
0646 
0647 #else /* CONFIG_MMU_NOTIFIER */
0648 
0649 struct mmu_notifier_range {
0650     unsigned long start;
0651     unsigned long end;
0652 };
0653 
0654 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
0655                         unsigned long start,
0656                         unsigned long end)
0657 {
0658     range->start = start;
0659     range->end = end;
0660 }
0661 
0662 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end)  \
0663     _mmu_notifier_range_init(range, start, end)
0664 #define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
0665                     end, owner) \
0666     _mmu_notifier_range_init(range, start, end)
0667 
0668 static inline bool
0669 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
0670 {
0671     return true;
0672 }
0673 
0674 static inline int mm_has_notifiers(struct mm_struct *mm)
0675 {
0676     return 0;
0677 }
0678 
0679 static inline void mmu_notifier_release(struct mm_struct *mm)
0680 {
0681 }
0682 
0683 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
0684                       unsigned long start,
0685                       unsigned long end)
0686 {
0687     return 0;
0688 }
0689 
0690 static inline int mmu_notifier_test_young(struct mm_struct *mm,
0691                       unsigned long address)
0692 {
0693     return 0;
0694 }
0695 
0696 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
0697                        unsigned long address, pte_t pte)
0698 {
0699 }
0700 
0701 static inline void
0702 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
0703 {
0704 }
0705 
0706 static inline int
0707 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
0708 {
0709     return 0;
0710 }
0711 
0712 static inline
0713 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
0714 {
0715 }
0716 
0717 static inline void
0718 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
0719 {
0720 }
0721 
0722 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
0723                   unsigned long start, unsigned long end)
0724 {
0725 }
0726 
0727 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
0728 {
0729 }
0730 
0731 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
0732 {
0733 }
0734 
0735 #define mmu_notifier_range_update_to_read_only(r) false
0736 
0737 #define ptep_clear_flush_young_notify ptep_clear_flush_young
0738 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
0739 #define ptep_clear_young_notify ptep_test_and_clear_young
0740 #define pmdp_clear_young_notify pmdp_test_and_clear_young
0741 #define ptep_clear_flush_notify ptep_clear_flush
0742 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
0743 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
0744 #define set_pte_at_notify set_pte_at
0745 
0746 static inline void mmu_notifier_synchronize(void)
0747 {
0748 }
0749 
0750 #endif /* CONFIG_MMU_NOTIFIER */
0751 
0752 #endif /* _LINUX_MMU_NOTIFIER_H */