0001
0002 #ifndef _LINUX_MMU_NOTIFIER_H
0003 #define _LINUX_MMU_NOTIFIER_H
0004
0005 #include <linux/list.h>
0006 #include <linux/spinlock.h>
0007 #include <linux/mm_types.h>
0008 #include <linux/mmap_lock.h>
0009 #include <linux/srcu.h>
0010 #include <linux/interval_tree.h>
0011
0012 struct mmu_notifier_subscriptions;
0013 struct mmu_notifier;
0014 struct mmu_notifier_range;
0015 struct mmu_interval_notifier;
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 enum mmu_notifier_event {
0052 MMU_NOTIFY_UNMAP = 0,
0053 MMU_NOTIFY_CLEAR,
0054 MMU_NOTIFY_PROTECTION_VMA,
0055 MMU_NOTIFY_PROTECTION_PAGE,
0056 MMU_NOTIFY_SOFT_DIRTY,
0057 MMU_NOTIFY_RELEASE,
0058 MMU_NOTIFY_MIGRATE,
0059 MMU_NOTIFY_EXCLUSIVE,
0060 };
0061
0062 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
0063
0064 struct mmu_notifier_ops {
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 void (*release)(struct mmu_notifier *subscription,
0089 struct mm_struct *mm);
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 int (*clear_flush_young)(struct mmu_notifier *subscription,
0101 struct mm_struct *mm,
0102 unsigned long start,
0103 unsigned long end);
0104
0105
0106
0107
0108
0109
0110 int (*clear_young)(struct mmu_notifier *subscription,
0111 struct mm_struct *mm,
0112 unsigned long start,
0113 unsigned long end);
0114
0115
0116
0117
0118
0119
0120
0121 int (*test_young)(struct mmu_notifier *subscription,
0122 struct mm_struct *mm,
0123 unsigned long address);
0124
0125
0126
0127
0128
0129 void (*change_pte)(struct mmu_notifier *subscription,
0130 struct mm_struct *mm,
0131 unsigned long address,
0132 pte_t pte);
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 int (*invalidate_range_start)(struct mmu_notifier *subscription,
0185 const struct mmu_notifier_range *range);
0186 void (*invalidate_range_end)(struct mmu_notifier *subscription,
0187 const struct mmu_notifier_range *range);
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207 void (*invalidate_range)(struct mmu_notifier *subscription,
0208 struct mm_struct *mm,
0209 unsigned long start,
0210 unsigned long end);
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
0223 void (*free_notifier)(struct mmu_notifier *subscription);
0224 };
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 struct mmu_notifier {
0238 struct hlist_node hlist;
0239 const struct mmu_notifier_ops *ops;
0240 struct mm_struct *mm;
0241 struct rcu_head rcu;
0242 unsigned int users;
0243 };
0244
0245
0246
0247
0248
0249
0250
0251 struct mmu_interval_notifier_ops {
0252 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
0253 const struct mmu_notifier_range *range,
0254 unsigned long cur_seq);
0255 };
0256
0257 struct mmu_interval_notifier {
0258 struct interval_tree_node interval_tree;
0259 const struct mmu_interval_notifier_ops *ops;
0260 struct mm_struct *mm;
0261 struct hlist_node deferred_item;
0262 unsigned long invalidate_seq;
0263 };
0264
0265 #ifdef CONFIG_MMU_NOTIFIER
0266
0267 #ifdef CONFIG_LOCKDEP
0268 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
0269 #endif
0270
0271 struct mmu_notifier_range {
0272 struct vm_area_struct *vma;
0273 struct mm_struct *mm;
0274 unsigned long start;
0275 unsigned long end;
0276 unsigned flags;
0277 enum mmu_notifier_event event;
0278 void *owner;
0279 };
0280
0281 static inline int mm_has_notifiers(struct mm_struct *mm)
0282 {
0283 return unlikely(mm->notifier_subscriptions);
0284 }
0285
0286 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
0287 struct mm_struct *mm);
0288 static inline struct mmu_notifier *
0289 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
0290 {
0291 struct mmu_notifier *ret;
0292
0293 mmap_write_lock(mm);
0294 ret = mmu_notifier_get_locked(ops, mm);
0295 mmap_write_unlock(mm);
0296 return ret;
0297 }
0298 void mmu_notifier_put(struct mmu_notifier *subscription);
0299 void mmu_notifier_synchronize(void);
0300
0301 extern int mmu_notifier_register(struct mmu_notifier *subscription,
0302 struct mm_struct *mm);
0303 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
0304 struct mm_struct *mm);
0305 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
0306 struct mm_struct *mm);
0307
0308 unsigned long
0309 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
0310 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
0311 struct mm_struct *mm, unsigned long start,
0312 unsigned long length,
0313 const struct mmu_interval_notifier_ops *ops);
0314 int mmu_interval_notifier_insert_locked(
0315 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
0316 unsigned long start, unsigned long length,
0317 const struct mmu_interval_notifier_ops *ops);
0318 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 static inline void
0334 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
0335 unsigned long cur_seq)
0336 {
0337 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
0338 }
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354 static inline bool
0355 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
0356 unsigned long seq)
0357 {
0358 return interval_sub->invalidate_seq != seq;
0359 }
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 static inline bool
0379 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
0380 unsigned long seq)
0381 {
0382
0383 return READ_ONCE(interval_sub->invalidate_seq) != seq;
0384 }
0385
0386 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
0387 extern void __mmu_notifier_release(struct mm_struct *mm);
0388 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
0389 unsigned long start,
0390 unsigned long end);
0391 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
0392 unsigned long start,
0393 unsigned long end);
0394 extern int __mmu_notifier_test_young(struct mm_struct *mm,
0395 unsigned long address);
0396 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
0397 unsigned long address, pte_t pte);
0398 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
0399 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
0400 bool only_end);
0401 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
0402 unsigned long start, unsigned long end);
0403 extern bool
0404 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
0405
0406 static inline bool
0407 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
0408 {
0409 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
0410 }
0411
0412 static inline void mmu_notifier_release(struct mm_struct *mm)
0413 {
0414 if (mm_has_notifiers(mm))
0415 __mmu_notifier_release(mm);
0416 }
0417
0418 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
0419 unsigned long start,
0420 unsigned long end)
0421 {
0422 if (mm_has_notifiers(mm))
0423 return __mmu_notifier_clear_flush_young(mm, start, end);
0424 return 0;
0425 }
0426
0427 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
0428 unsigned long start,
0429 unsigned long end)
0430 {
0431 if (mm_has_notifiers(mm))
0432 return __mmu_notifier_clear_young(mm, start, end);
0433 return 0;
0434 }
0435
0436 static inline int mmu_notifier_test_young(struct mm_struct *mm,
0437 unsigned long address)
0438 {
0439 if (mm_has_notifiers(mm))
0440 return __mmu_notifier_test_young(mm, address);
0441 return 0;
0442 }
0443
0444 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
0445 unsigned long address, pte_t pte)
0446 {
0447 if (mm_has_notifiers(mm))
0448 __mmu_notifier_change_pte(mm, address, pte);
0449 }
0450
0451 static inline void
0452 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
0453 {
0454 might_sleep();
0455
0456 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
0457 if (mm_has_notifiers(range->mm)) {
0458 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
0459 __mmu_notifier_invalidate_range_start(range);
0460 }
0461 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
0462 }
0463
0464 static inline int
0465 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
0466 {
0467 int ret = 0;
0468
0469 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
0470 if (mm_has_notifiers(range->mm)) {
0471 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
0472 ret = __mmu_notifier_invalidate_range_start(range);
0473 }
0474 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
0475 return ret;
0476 }
0477
0478 static inline void
0479 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
0480 {
0481 if (mmu_notifier_range_blockable(range))
0482 might_sleep();
0483
0484 if (mm_has_notifiers(range->mm))
0485 __mmu_notifier_invalidate_range_end(range, false);
0486 }
0487
0488 static inline void
0489 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
0490 {
0491 if (mm_has_notifiers(range->mm))
0492 __mmu_notifier_invalidate_range_end(range, true);
0493 }
0494
0495 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
0496 unsigned long start, unsigned long end)
0497 {
0498 if (mm_has_notifiers(mm))
0499 __mmu_notifier_invalidate_range(mm, start, end);
0500 }
0501
0502 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
0503 {
0504 mm->notifier_subscriptions = NULL;
0505 }
0506
0507 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
0508 {
0509 if (mm_has_notifiers(mm))
0510 __mmu_notifier_subscriptions_destroy(mm);
0511 }
0512
0513
0514 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
0515 enum mmu_notifier_event event,
0516 unsigned flags,
0517 struct vm_area_struct *vma,
0518 struct mm_struct *mm,
0519 unsigned long start,
0520 unsigned long end)
0521 {
0522 range->vma = vma;
0523 range->event = event;
0524 range->mm = mm;
0525 range->start = start;
0526 range->end = end;
0527 range->flags = flags;
0528 }
0529
0530 static inline void mmu_notifier_range_init_owner(
0531 struct mmu_notifier_range *range,
0532 enum mmu_notifier_event event, unsigned int flags,
0533 struct vm_area_struct *vma, struct mm_struct *mm,
0534 unsigned long start, unsigned long end, void *owner)
0535 {
0536 mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
0537 range->owner = owner;
0538 }
0539
0540 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
0541 ({ \
0542 int __young; \
0543 struct vm_area_struct *___vma = __vma; \
0544 unsigned long ___address = __address; \
0545 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
0546 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
0547 ___address, \
0548 ___address + \
0549 PAGE_SIZE); \
0550 __young; \
0551 })
0552
0553 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
0554 ({ \
0555 int __young; \
0556 struct vm_area_struct *___vma = __vma; \
0557 unsigned long ___address = __address; \
0558 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
0559 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
0560 ___address, \
0561 ___address + \
0562 PMD_SIZE); \
0563 __young; \
0564 })
0565
0566 #define ptep_clear_young_notify(__vma, __address, __ptep) \
0567 ({ \
0568 int __young; \
0569 struct vm_area_struct *___vma = __vma; \
0570 unsigned long ___address = __address; \
0571 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
0572 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
0573 ___address + PAGE_SIZE); \
0574 __young; \
0575 })
0576
0577 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
0578 ({ \
0579 int __young; \
0580 struct vm_area_struct *___vma = __vma; \
0581 unsigned long ___address = __address; \
0582 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
0583 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
0584 ___address + PMD_SIZE); \
0585 __young; \
0586 })
0587
0588 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
0589 ({ \
0590 unsigned long ___addr = __address & PAGE_MASK; \
0591 struct mm_struct *___mm = (__vma)->vm_mm; \
0592 pte_t ___pte; \
0593 \
0594 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
0595 mmu_notifier_invalidate_range(___mm, ___addr, \
0596 ___addr + PAGE_SIZE); \
0597 \
0598 ___pte; \
0599 })
0600
0601 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
0602 ({ \
0603 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
0604 struct mm_struct *___mm = (__vma)->vm_mm; \
0605 pmd_t ___pmd; \
0606 \
0607 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
0608 mmu_notifier_invalidate_range(___mm, ___haddr, \
0609 ___haddr + HPAGE_PMD_SIZE); \
0610 \
0611 ___pmd; \
0612 })
0613
0614 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
0615 ({ \
0616 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
0617 struct mm_struct *___mm = (__vma)->vm_mm; \
0618 pud_t ___pud; \
0619 \
0620 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
0621 mmu_notifier_invalidate_range(___mm, ___haddr, \
0622 ___haddr + HPAGE_PUD_SIZE); \
0623 \
0624 ___pud; \
0625 })
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
0638 ({ \
0639 struct mm_struct *___mm = __mm; \
0640 unsigned long ___address = __address; \
0641 pte_t ___pte = __pte; \
0642 \
0643 mmu_notifier_change_pte(___mm, ___address, ___pte); \
0644 set_pte_at(___mm, ___address, __ptep, ___pte); \
0645 })
0646
0647 #else
0648
0649 struct mmu_notifier_range {
0650 unsigned long start;
0651 unsigned long end;
0652 };
0653
0654 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
0655 unsigned long start,
0656 unsigned long end)
0657 {
0658 range->start = start;
0659 range->end = end;
0660 }
0661
0662 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
0663 _mmu_notifier_range_init(range, start, end)
0664 #define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
0665 end, owner) \
0666 _mmu_notifier_range_init(range, start, end)
0667
0668 static inline bool
0669 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
0670 {
0671 return true;
0672 }
0673
0674 static inline int mm_has_notifiers(struct mm_struct *mm)
0675 {
0676 return 0;
0677 }
0678
0679 static inline void mmu_notifier_release(struct mm_struct *mm)
0680 {
0681 }
0682
0683 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
0684 unsigned long start,
0685 unsigned long end)
0686 {
0687 return 0;
0688 }
0689
0690 static inline int mmu_notifier_test_young(struct mm_struct *mm,
0691 unsigned long address)
0692 {
0693 return 0;
0694 }
0695
0696 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
0697 unsigned long address, pte_t pte)
0698 {
0699 }
0700
0701 static inline void
0702 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
0703 {
0704 }
0705
0706 static inline int
0707 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
0708 {
0709 return 0;
0710 }
0711
0712 static inline
0713 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
0714 {
0715 }
0716
0717 static inline void
0718 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
0719 {
0720 }
0721
0722 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
0723 unsigned long start, unsigned long end)
0724 {
0725 }
0726
0727 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
0728 {
0729 }
0730
0731 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
0732 {
0733 }
0734
0735 #define mmu_notifier_range_update_to_read_only(r) false
0736
0737 #define ptep_clear_flush_young_notify ptep_clear_flush_young
0738 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
0739 #define ptep_clear_young_notify ptep_test_and_clear_young
0740 #define pmdp_clear_young_notify pmdp_test_and_clear_young
0741 #define ptep_clear_flush_notify ptep_clear_flush
0742 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
0743 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
0744 #define set_pte_at_notify set_pte_at
0745
0746 static inline void mmu_notifier_synchronize(void)
0747 {
0748 }
0749
0750 #endif
0751
0752 #endif