0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/mm.h>
0009 #include <linux/fs.h>
0010 #include <linux/rmap.h>
0011 #include <linux/interval_tree_generic.h>
0012
0013 static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
0014 {
0015 return v->vm_pgoff;
0016 }
0017
0018 static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
0019 {
0020 return v->vm_pgoff + vma_pages(v) - 1;
0021 }
0022
0023 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
0024 unsigned long, shared.rb_subtree_last,
0025 vma_start_pgoff, vma_last_pgoff, , vma_interval_tree)
0026
0027
0028 void vma_interval_tree_insert_after(struct vm_area_struct *node,
0029 struct vm_area_struct *prev,
0030 struct rb_root_cached *root)
0031 {
0032 struct rb_node **link;
0033 struct vm_area_struct *parent;
0034 unsigned long last = vma_last_pgoff(node);
0035
0036 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
0037
0038 if (!prev->shared.rb.rb_right) {
0039 parent = prev;
0040 link = &prev->shared.rb.rb_right;
0041 } else {
0042 parent = rb_entry(prev->shared.rb.rb_right,
0043 struct vm_area_struct, shared.rb);
0044 if (parent->shared.rb_subtree_last < last)
0045 parent->shared.rb_subtree_last = last;
0046 while (parent->shared.rb.rb_left) {
0047 parent = rb_entry(parent->shared.rb.rb_left,
0048 struct vm_area_struct, shared.rb);
0049 if (parent->shared.rb_subtree_last < last)
0050 parent->shared.rb_subtree_last = last;
0051 }
0052 link = &parent->shared.rb.rb_left;
0053 }
0054
0055 node->shared.rb_subtree_last = last;
0056 rb_link_node(&node->shared.rb, &parent->shared.rb, link);
0057 rb_insert_augmented(&node->shared.rb, &root->rb_root,
0058 &vma_interval_tree_augment);
0059 }
0060
0061 static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
0062 {
0063 return vma_start_pgoff(avc->vma);
0064 }
0065
0066 static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
0067 {
0068 return vma_last_pgoff(avc->vma);
0069 }
0070
0071 INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
0072 avc_start_pgoff, avc_last_pgoff,
0073 static inline, __anon_vma_interval_tree)
0074
0075 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
0076 struct rb_root_cached *root)
0077 {
0078 #ifdef CONFIG_DEBUG_VM_RB
0079 node->cached_vma_start = avc_start_pgoff(node);
0080 node->cached_vma_last = avc_last_pgoff(node);
0081 #endif
0082 __anon_vma_interval_tree_insert(node, root);
0083 }
0084
0085 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
0086 struct rb_root_cached *root)
0087 {
0088 __anon_vma_interval_tree_remove(node, root);
0089 }
0090
0091 struct anon_vma_chain *
0092 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
0093 unsigned long first, unsigned long last)
0094 {
0095 return __anon_vma_interval_tree_iter_first(root, first, last);
0096 }
0097
0098 struct anon_vma_chain *
0099 anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
0100 unsigned long first, unsigned long last)
0101 {
0102 return __anon_vma_interval_tree_iter_next(node, first, last);
0103 }
0104
0105 #ifdef CONFIG_DEBUG_VM_RB
0106 void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
0107 {
0108 WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
0109 WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
0110 }
0111 #endif