0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/rculist.h>
0011 #include <linux/mmu_notifier.h>
0012 #include <linux/export.h>
0013 #include <linux/mm.h>
0014 #include <linux/err.h>
0015 #include <linux/interval_tree.h>
0016 #include <linux/srcu.h>
0017 #include <linux/rcupdate.h>
0018 #include <linux/sched.h>
0019 #include <linux/sched/mm.h>
0020 #include <linux/slab.h>
0021
0022
0023 DEFINE_STATIC_SRCU(srcu);
0024
0025 #ifdef CONFIG_LOCKDEP
0026 struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
0027 .name = "mmu_notifier_invalidate_range_start"
0028 };
0029 #endif
0030
0031
0032
0033
0034
0035
0036
0037 struct mmu_notifier_subscriptions {
0038
0039 struct hlist_head list;
0040 bool has_itree;
0041
0042 spinlock_t lock;
0043 unsigned long invalidate_seq;
0044 unsigned long active_invalidate_ranges;
0045 struct rb_root_cached itree;
0046 wait_queue_head_t wq;
0047 struct hlist_head deferred_list;
0048 };
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 static bool
0087 mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
0088 {
0089 lockdep_assert_held(&subscriptions->lock);
0090 return subscriptions->invalidate_seq & 1;
0091 }
0092
0093 static struct mmu_interval_notifier *
0094 mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
0095 const struct mmu_notifier_range *range,
0096 unsigned long *seq)
0097 {
0098 struct interval_tree_node *node;
0099 struct mmu_interval_notifier *res = NULL;
0100
0101 spin_lock(&subscriptions->lock);
0102 subscriptions->active_invalidate_ranges++;
0103 node = interval_tree_iter_first(&subscriptions->itree, range->start,
0104 range->end - 1);
0105 if (node) {
0106 subscriptions->invalidate_seq |= 1;
0107 res = container_of(node, struct mmu_interval_notifier,
0108 interval_tree);
0109 }
0110
0111 *seq = subscriptions->invalidate_seq;
0112 spin_unlock(&subscriptions->lock);
0113 return res;
0114 }
0115
0116 static struct mmu_interval_notifier *
0117 mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
0118 const struct mmu_notifier_range *range)
0119 {
0120 struct interval_tree_node *node;
0121
0122 node = interval_tree_iter_next(&interval_sub->interval_tree,
0123 range->start, range->end - 1);
0124 if (!node)
0125 return NULL;
0126 return container_of(node, struct mmu_interval_notifier, interval_tree);
0127 }
0128
0129 static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
0130 {
0131 struct mmu_interval_notifier *interval_sub;
0132 struct hlist_node *next;
0133
0134 spin_lock(&subscriptions->lock);
0135 if (--subscriptions->active_invalidate_ranges ||
0136 !mn_itree_is_invalidating(subscriptions)) {
0137 spin_unlock(&subscriptions->lock);
0138 return;
0139 }
0140
0141
0142 subscriptions->invalidate_seq++;
0143
0144
0145
0146
0147
0148
0149
0150 hlist_for_each_entry_safe(interval_sub, next,
0151 &subscriptions->deferred_list,
0152 deferred_item) {
0153 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
0154 interval_tree_insert(&interval_sub->interval_tree,
0155 &subscriptions->itree);
0156 else
0157 interval_tree_remove(&interval_sub->interval_tree,
0158 &subscriptions->itree);
0159 hlist_del(&interval_sub->deferred_item);
0160 }
0161 spin_unlock(&subscriptions->lock);
0162
0163 wake_up_all(&subscriptions->wq);
0164 }
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 unsigned long
0186 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
0187 {
0188 struct mmu_notifier_subscriptions *subscriptions =
0189 interval_sub->mm->notifier_subscriptions;
0190 unsigned long seq;
0191 bool is_invalidating;
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232 spin_lock(&subscriptions->lock);
0233
0234 seq = READ_ONCE(interval_sub->invalidate_seq);
0235 is_invalidating = seq == subscriptions->invalidate_seq;
0236 spin_unlock(&subscriptions->lock);
0237
0238
0239
0240
0241
0242
0243
0244
0245 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
0246 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
0247 if (is_invalidating)
0248 wait_event(subscriptions->wq,
0249 READ_ONCE(subscriptions->invalidate_seq) != seq);
0250
0251
0252
0253
0254
0255
0256
0257 return seq;
0258 }
0259 EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
0260
0261 static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
0262 struct mm_struct *mm)
0263 {
0264 struct mmu_notifier_range range = {
0265 .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
0266 .event = MMU_NOTIFY_RELEASE,
0267 .mm = mm,
0268 .start = 0,
0269 .end = ULONG_MAX,
0270 };
0271 struct mmu_interval_notifier *interval_sub;
0272 unsigned long cur_seq;
0273 bool ret;
0274
0275 for (interval_sub =
0276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
0277 interval_sub;
0278 interval_sub = mn_itree_inv_next(interval_sub, &range)) {
0279 ret = interval_sub->ops->invalidate(interval_sub, &range,
0280 cur_seq);
0281 WARN_ON(!ret);
0282 }
0283
0284 mn_itree_inv_end(subscriptions);
0285 }
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299 static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
0300 struct mm_struct *mm)
0301 {
0302 struct mmu_notifier *subscription;
0303 int id;
0304
0305
0306
0307
0308
0309 id = srcu_read_lock(&srcu);
0310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
0311 srcu_read_lock_held(&srcu))
0312
0313
0314
0315
0316
0317
0318 if (subscription->ops->release)
0319 subscription->ops->release(subscription, mm);
0320
0321 spin_lock(&subscriptions->lock);
0322 while (unlikely(!hlist_empty(&subscriptions->list))) {
0323 subscription = hlist_entry(subscriptions->list.first,
0324 struct mmu_notifier, hlist);
0325
0326
0327
0328
0329
0330
0331 hlist_del_init_rcu(&subscription->hlist);
0332 }
0333 spin_unlock(&subscriptions->lock);
0334 srcu_read_unlock(&srcu, id);
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 synchronize_srcu(&srcu);
0346 }
0347
0348 void __mmu_notifier_release(struct mm_struct *mm)
0349 {
0350 struct mmu_notifier_subscriptions *subscriptions =
0351 mm->notifier_subscriptions;
0352
0353 if (subscriptions->has_itree)
0354 mn_itree_release(subscriptions, mm);
0355
0356 if (!hlist_empty(&subscriptions->list))
0357 mn_hlist_release(subscriptions, mm);
0358 }
0359
0360
0361
0362
0363
0364
0365 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
0366 unsigned long start,
0367 unsigned long end)
0368 {
0369 struct mmu_notifier *subscription;
0370 int young = 0, id;
0371
0372 id = srcu_read_lock(&srcu);
0373 hlist_for_each_entry_rcu(subscription,
0374 &mm->notifier_subscriptions->list, hlist,
0375 srcu_read_lock_held(&srcu)) {
0376 if (subscription->ops->clear_flush_young)
0377 young |= subscription->ops->clear_flush_young(
0378 subscription, mm, start, end);
0379 }
0380 srcu_read_unlock(&srcu, id);
0381
0382 return young;
0383 }
0384
0385 int __mmu_notifier_clear_young(struct mm_struct *mm,
0386 unsigned long start,
0387 unsigned long end)
0388 {
0389 struct mmu_notifier *subscription;
0390 int young = 0, id;
0391
0392 id = srcu_read_lock(&srcu);
0393 hlist_for_each_entry_rcu(subscription,
0394 &mm->notifier_subscriptions->list, hlist,
0395 srcu_read_lock_held(&srcu)) {
0396 if (subscription->ops->clear_young)
0397 young |= subscription->ops->clear_young(subscription,
0398 mm, start, end);
0399 }
0400 srcu_read_unlock(&srcu, id);
0401
0402 return young;
0403 }
0404
0405 int __mmu_notifier_test_young(struct mm_struct *mm,
0406 unsigned long address)
0407 {
0408 struct mmu_notifier *subscription;
0409 int young = 0, id;
0410
0411 id = srcu_read_lock(&srcu);
0412 hlist_for_each_entry_rcu(subscription,
0413 &mm->notifier_subscriptions->list, hlist,
0414 srcu_read_lock_held(&srcu)) {
0415 if (subscription->ops->test_young) {
0416 young = subscription->ops->test_young(subscription, mm,
0417 address);
0418 if (young)
0419 break;
0420 }
0421 }
0422 srcu_read_unlock(&srcu, id);
0423
0424 return young;
0425 }
0426
0427 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
0428 pte_t pte)
0429 {
0430 struct mmu_notifier *subscription;
0431 int id;
0432
0433 id = srcu_read_lock(&srcu);
0434 hlist_for_each_entry_rcu(subscription,
0435 &mm->notifier_subscriptions->list, hlist,
0436 srcu_read_lock_held(&srcu)) {
0437 if (subscription->ops->change_pte)
0438 subscription->ops->change_pte(subscription, mm, address,
0439 pte);
0440 }
0441 srcu_read_unlock(&srcu, id);
0442 }
0443
0444 static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
0445 const struct mmu_notifier_range *range)
0446 {
0447 struct mmu_interval_notifier *interval_sub;
0448 unsigned long cur_seq;
0449
0450 for (interval_sub =
0451 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
0452 interval_sub;
0453 interval_sub = mn_itree_inv_next(interval_sub, range)) {
0454 bool ret;
0455
0456 ret = interval_sub->ops->invalidate(interval_sub, range,
0457 cur_seq);
0458 if (!ret) {
0459 if (WARN_ON(mmu_notifier_range_blockable(range)))
0460 continue;
0461 goto out_would_block;
0462 }
0463 }
0464 return 0;
0465
0466 out_would_block:
0467
0468
0469
0470
0471 mn_itree_inv_end(subscriptions);
0472 return -EAGAIN;
0473 }
0474
0475 static int mn_hlist_invalidate_range_start(
0476 struct mmu_notifier_subscriptions *subscriptions,
0477 struct mmu_notifier_range *range)
0478 {
0479 struct mmu_notifier *subscription;
0480 int ret = 0;
0481 int id;
0482
0483 id = srcu_read_lock(&srcu);
0484 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
0485 srcu_read_lock_held(&srcu)) {
0486 const struct mmu_notifier_ops *ops = subscription->ops;
0487
0488 if (ops->invalidate_range_start) {
0489 int _ret;
0490
0491 if (!mmu_notifier_range_blockable(range))
0492 non_block_start();
0493 _ret = ops->invalidate_range_start(subscription, range);
0494 if (!mmu_notifier_range_blockable(range))
0495 non_block_end();
0496 if (_ret) {
0497 pr_info("%pS callback failed with %d in %sblockable context.\n",
0498 ops->invalidate_range_start, _ret,
0499 !mmu_notifier_range_blockable(range) ?
0500 "non-" :
0501 "");
0502 WARN_ON(mmu_notifier_range_blockable(range) ||
0503 _ret != -EAGAIN);
0504
0505
0506
0507
0508
0509
0510 WARN_ON(ops->invalidate_range_end);
0511 ret = _ret;
0512 }
0513 }
0514 }
0515
0516 if (ret) {
0517
0518
0519
0520
0521
0522 hlist_for_each_entry_rcu(subscription, &subscriptions->list,
0523 hlist, srcu_read_lock_held(&srcu)) {
0524 if (!subscription->ops->invalidate_range_end)
0525 continue;
0526
0527 subscription->ops->invalidate_range_end(subscription,
0528 range);
0529 }
0530 }
0531 srcu_read_unlock(&srcu, id);
0532
0533 return ret;
0534 }
0535
0536 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
0537 {
0538 struct mmu_notifier_subscriptions *subscriptions =
0539 range->mm->notifier_subscriptions;
0540 int ret;
0541
0542 if (subscriptions->has_itree) {
0543 ret = mn_itree_invalidate(subscriptions, range);
0544 if (ret)
0545 return ret;
0546 }
0547 if (!hlist_empty(&subscriptions->list))
0548 return mn_hlist_invalidate_range_start(subscriptions, range);
0549 return 0;
0550 }
0551
0552 static void
0553 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
0554 struct mmu_notifier_range *range, bool only_end)
0555 {
0556 struct mmu_notifier *subscription;
0557 int id;
0558
0559 id = srcu_read_lock(&srcu);
0560 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
0561 srcu_read_lock_held(&srcu)) {
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575 if (!only_end && subscription->ops->invalidate_range)
0576 subscription->ops->invalidate_range(subscription,
0577 range->mm,
0578 range->start,
0579 range->end);
0580 if (subscription->ops->invalidate_range_end) {
0581 if (!mmu_notifier_range_blockable(range))
0582 non_block_start();
0583 subscription->ops->invalidate_range_end(subscription,
0584 range);
0585 if (!mmu_notifier_range_blockable(range))
0586 non_block_end();
0587 }
0588 }
0589 srcu_read_unlock(&srcu, id);
0590 }
0591
0592 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
0593 bool only_end)
0594 {
0595 struct mmu_notifier_subscriptions *subscriptions =
0596 range->mm->notifier_subscriptions;
0597
0598 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
0599 if (subscriptions->has_itree)
0600 mn_itree_inv_end(subscriptions);
0601
0602 if (!hlist_empty(&subscriptions->list))
0603 mn_hlist_invalidate_end(subscriptions, range, only_end);
0604 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
0605 }
0606
0607 void __mmu_notifier_invalidate_range(struct mm_struct *mm,
0608 unsigned long start, unsigned long end)
0609 {
0610 struct mmu_notifier *subscription;
0611 int id;
0612
0613 id = srcu_read_lock(&srcu);
0614 hlist_for_each_entry_rcu(subscription,
0615 &mm->notifier_subscriptions->list, hlist,
0616 srcu_read_lock_held(&srcu)) {
0617 if (subscription->ops->invalidate_range)
0618 subscription->ops->invalidate_range(subscription, mm,
0619 start, end);
0620 }
0621 srcu_read_unlock(&srcu, id);
0622 }
0623
0624
0625
0626
0627
0628
0629 int __mmu_notifier_register(struct mmu_notifier *subscription,
0630 struct mm_struct *mm)
0631 {
0632 struct mmu_notifier_subscriptions *subscriptions = NULL;
0633 int ret;
0634
0635 mmap_assert_write_locked(mm);
0636 BUG_ON(atomic_read(&mm->mm_users) <= 0);
0637
0638 if (!mm->notifier_subscriptions) {
0639
0640
0641
0642
0643
0644 subscriptions = kzalloc(
0645 sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
0646 if (!subscriptions)
0647 return -ENOMEM;
0648
0649 INIT_HLIST_HEAD(&subscriptions->list);
0650 spin_lock_init(&subscriptions->lock);
0651 subscriptions->invalidate_seq = 2;
0652 subscriptions->itree = RB_ROOT_CACHED;
0653 init_waitqueue_head(&subscriptions->wq);
0654 INIT_HLIST_HEAD(&subscriptions->deferred_list);
0655 }
0656
0657 ret = mm_take_all_locks(mm);
0658 if (unlikely(ret))
0659 goto out_clean;
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677 if (subscriptions)
0678 smp_store_release(&mm->notifier_subscriptions, subscriptions);
0679
0680 if (subscription) {
0681
0682 mmgrab(mm);
0683 subscription->mm = mm;
0684 subscription->users = 1;
0685
0686 spin_lock(&mm->notifier_subscriptions->lock);
0687 hlist_add_head_rcu(&subscription->hlist,
0688 &mm->notifier_subscriptions->list);
0689 spin_unlock(&mm->notifier_subscriptions->lock);
0690 } else
0691 mm->notifier_subscriptions->has_itree = true;
0692
0693 mm_drop_all_locks(mm);
0694 BUG_ON(atomic_read(&mm->mm_users) <= 0);
0695 return 0;
0696
0697 out_clean:
0698 kfree(subscriptions);
0699 return ret;
0700 }
0701 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722 int mmu_notifier_register(struct mmu_notifier *subscription,
0723 struct mm_struct *mm)
0724 {
0725 int ret;
0726
0727 mmap_write_lock(mm);
0728 ret = __mmu_notifier_register(subscription, mm);
0729 mmap_write_unlock(mm);
0730 return ret;
0731 }
0732 EXPORT_SYMBOL_GPL(mmu_notifier_register);
0733
0734 static struct mmu_notifier *
0735 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
0736 {
0737 struct mmu_notifier *subscription;
0738
0739 spin_lock(&mm->notifier_subscriptions->lock);
0740 hlist_for_each_entry_rcu(subscription,
0741 &mm->notifier_subscriptions->list, hlist,
0742 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
0743 if (subscription->ops != ops)
0744 continue;
0745
0746 if (likely(subscription->users != UINT_MAX))
0747 subscription->users++;
0748 else
0749 subscription = ERR_PTR(-EOVERFLOW);
0750 spin_unlock(&mm->notifier_subscriptions->lock);
0751 return subscription;
0752 }
0753 spin_unlock(&mm->notifier_subscriptions->lock);
0754 return NULL;
0755 }
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
0775 struct mm_struct *mm)
0776 {
0777 struct mmu_notifier *subscription;
0778 int ret;
0779
0780 mmap_assert_write_locked(mm);
0781
0782 if (mm->notifier_subscriptions) {
0783 subscription = find_get_mmu_notifier(mm, ops);
0784 if (subscription)
0785 return subscription;
0786 }
0787
0788 subscription = ops->alloc_notifier(mm);
0789 if (IS_ERR(subscription))
0790 return subscription;
0791 subscription->ops = ops;
0792 ret = __mmu_notifier_register(subscription, mm);
0793 if (ret)
0794 goto out_free;
0795 return subscription;
0796 out_free:
0797 subscription->ops->free_notifier(subscription);
0798 return ERR_PTR(ret);
0799 }
0800 EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
0801
0802
0803 void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
0804 {
0805 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
0806 kfree(mm->notifier_subscriptions);
0807 mm->notifier_subscriptions = LIST_POISON1;
0808 }
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820 void mmu_notifier_unregister(struct mmu_notifier *subscription,
0821 struct mm_struct *mm)
0822 {
0823 BUG_ON(atomic_read(&mm->mm_count) <= 0);
0824
0825 if (!hlist_unhashed(&subscription->hlist)) {
0826
0827
0828
0829
0830 int id;
0831
0832 id = srcu_read_lock(&srcu);
0833
0834
0835
0836
0837 if (subscription->ops->release)
0838 subscription->ops->release(subscription, mm);
0839 srcu_read_unlock(&srcu, id);
0840
0841 spin_lock(&mm->notifier_subscriptions->lock);
0842
0843
0844
0845
0846 hlist_del_init_rcu(&subscription->hlist);
0847 spin_unlock(&mm->notifier_subscriptions->lock);
0848 }
0849
0850
0851
0852
0853
0854 synchronize_srcu(&srcu);
0855
0856 BUG_ON(atomic_read(&mm->mm_count) <= 0);
0857
0858 mmdrop(mm);
0859 }
0860 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
0861
0862 static void mmu_notifier_free_rcu(struct rcu_head *rcu)
0863 {
0864 struct mmu_notifier *subscription =
0865 container_of(rcu, struct mmu_notifier, rcu);
0866 struct mm_struct *mm = subscription->mm;
0867
0868 subscription->ops->free_notifier(subscription);
0869
0870 mmdrop(mm);
0871 }
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895 void mmu_notifier_put(struct mmu_notifier *subscription)
0896 {
0897 struct mm_struct *mm = subscription->mm;
0898
0899 spin_lock(&mm->notifier_subscriptions->lock);
0900 if (WARN_ON(!subscription->users) || --subscription->users)
0901 goto out_unlock;
0902 hlist_del_init_rcu(&subscription->hlist);
0903 spin_unlock(&mm->notifier_subscriptions->lock);
0904
0905 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
0906 return;
0907
0908 out_unlock:
0909 spin_unlock(&mm->notifier_subscriptions->lock);
0910 }
0911 EXPORT_SYMBOL_GPL(mmu_notifier_put);
0912
0913 static int __mmu_interval_notifier_insert(
0914 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
0915 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
0916 unsigned long length, const struct mmu_interval_notifier_ops *ops)
0917 {
0918 interval_sub->mm = mm;
0919 interval_sub->ops = ops;
0920 RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
0921 interval_sub->interval_tree.start = start;
0922
0923
0924
0925
0926 if (length == 0 ||
0927 check_add_overflow(start, length - 1,
0928 &interval_sub->interval_tree.last))
0929 return -EOVERFLOW;
0930
0931
0932 if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
0933 return -EINVAL;
0934
0935
0936 mmgrab(mm);
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951 spin_lock(&subscriptions->lock);
0952 if (subscriptions->active_invalidate_ranges) {
0953 if (mn_itree_is_invalidating(subscriptions))
0954 hlist_add_head(&interval_sub->deferred_item,
0955 &subscriptions->deferred_list);
0956 else {
0957 subscriptions->invalidate_seq |= 1;
0958 interval_tree_insert(&interval_sub->interval_tree,
0959 &subscriptions->itree);
0960 }
0961 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
0962 } else {
0963 WARN_ON(mn_itree_is_invalidating(subscriptions));
0964
0965
0966
0967
0968
0969
0970 interval_sub->invalidate_seq =
0971 subscriptions->invalidate_seq - 1;
0972 interval_tree_insert(&interval_sub->interval_tree,
0973 &subscriptions->itree);
0974 }
0975 spin_unlock(&subscriptions->lock);
0976 return 0;
0977 }
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
0996 struct mm_struct *mm, unsigned long start,
0997 unsigned long length,
0998 const struct mmu_interval_notifier_ops *ops)
0999 {
1000 struct mmu_notifier_subscriptions *subscriptions;
1001 int ret;
1002
1003 might_lock(&mm->mmap_lock);
1004
1005 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
1006 if (!subscriptions || !subscriptions->has_itree) {
1007 ret = mmu_notifier_register(NULL, mm);
1008 if (ret)
1009 return ret;
1010 subscriptions = mm->notifier_subscriptions;
1011 }
1012 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1013 start, length, ops);
1014 }
1015 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
1016
1017 int mmu_interval_notifier_insert_locked(
1018 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
1019 unsigned long start, unsigned long length,
1020 const struct mmu_interval_notifier_ops *ops)
1021 {
1022 struct mmu_notifier_subscriptions *subscriptions =
1023 mm->notifier_subscriptions;
1024 int ret;
1025
1026 mmap_assert_write_locked(mm);
1027
1028 if (!subscriptions || !subscriptions->has_itree) {
1029 ret = __mmu_notifier_register(NULL, mm);
1030 if (ret)
1031 return ret;
1032 subscriptions = mm->notifier_subscriptions;
1033 }
1034 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1035 start, length, ops);
1036 }
1037 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1038
1039 static bool
1040 mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
1041 unsigned long seq)
1042 {
1043 bool ret;
1044
1045 spin_lock(&subscriptions->lock);
1046 ret = subscriptions->invalidate_seq != seq;
1047 spin_unlock(&subscriptions->lock);
1048 return ret;
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1062 {
1063 struct mm_struct *mm = interval_sub->mm;
1064 struct mmu_notifier_subscriptions *subscriptions =
1065 mm->notifier_subscriptions;
1066 unsigned long seq = 0;
1067
1068 might_sleep();
1069
1070 spin_lock(&subscriptions->lock);
1071 if (mn_itree_is_invalidating(subscriptions)) {
1072
1073
1074
1075
1076 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1077 hlist_del(&interval_sub->deferred_item);
1078 } else {
1079 hlist_add_head(&interval_sub->deferred_item,
1080 &subscriptions->deferred_list);
1081 seq = subscriptions->invalidate_seq;
1082 }
1083 } else {
1084 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1085 interval_tree_remove(&interval_sub->interval_tree,
1086 &subscriptions->itree);
1087 }
1088 spin_unlock(&subscriptions->lock);
1089
1090
1091
1092
1093
1094 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1095 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1096 if (seq)
1097 wait_event(subscriptions->wq,
1098 mmu_interval_seq_released(subscriptions, seq));
1099
1100
1101 mmdrop(mm);
1102 }
1103 EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 void mmu_notifier_synchronize(void)
1119 {
1120 synchronize_srcu(&srcu);
1121 }
1122 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1123
1124 bool
1125 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
1126 {
1127 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
1128 return false;
1129
1130 return range->vma->vm_flags & VM_READ;
1131 }
1132 EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);