Back to home page

LXR

 
 

    


0001 /*
0002  * mm/interval_tree.c - interval tree for mapping->i_mmap
0003  *
0004  * Copyright (C) 2012, Michel Lespinasse <walken@google.com>
0005  *
0006  * This file is released under the GPL v2.
0007  */
0008 
0009 #include <linux/mm.h>
0010 #include <linux/fs.h>
0011 #include <linux/rmap.h>
0012 #include <linux/interval_tree_generic.h>
0013 
0014 static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
0015 {
0016     return v->vm_pgoff;
0017 }
0018 
0019 static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
0020 {
0021     return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
0022 }
0023 
0024 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
0025              unsigned long, shared.rb_subtree_last,
0026              vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
0027 
0028 /* Insert node immediately after prev in the interval tree */
0029 void vma_interval_tree_insert_after(struct vm_area_struct *node,
0030                     struct vm_area_struct *prev,
0031                     struct rb_root *root)
0032 {
0033     struct rb_node **link;
0034     struct vm_area_struct *parent;
0035     unsigned long last = vma_last_pgoff(node);
0036 
0037     VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
0038 
0039     if (!prev->shared.rb.rb_right) {
0040         parent = prev;
0041         link = &prev->shared.rb.rb_right;
0042     } else {
0043         parent = rb_entry(prev->shared.rb.rb_right,
0044                   struct vm_area_struct, shared.rb);
0045         if (parent->shared.rb_subtree_last < last)
0046             parent->shared.rb_subtree_last = last;
0047         while (parent->shared.rb.rb_left) {
0048             parent = rb_entry(parent->shared.rb.rb_left,
0049                 struct vm_area_struct, shared.rb);
0050             if (parent->shared.rb_subtree_last < last)
0051                 parent->shared.rb_subtree_last = last;
0052         }
0053         link = &parent->shared.rb.rb_left;
0054     }
0055 
0056     node->shared.rb_subtree_last = last;
0057     rb_link_node(&node->shared.rb, &parent->shared.rb, link);
0058     rb_insert_augmented(&node->shared.rb, root,
0059                 &vma_interval_tree_augment);
0060 }
0061 
0062 static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
0063 {
0064     return vma_start_pgoff(avc->vma);
0065 }
0066 
0067 static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
0068 {
0069     return vma_last_pgoff(avc->vma);
0070 }
0071 
0072 INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
0073              avc_start_pgoff, avc_last_pgoff,
0074              static inline, __anon_vma_interval_tree)
0075 
0076 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
0077                    struct rb_root *root)
0078 {
0079 #ifdef CONFIG_DEBUG_VM_RB
0080     node->cached_vma_start = avc_start_pgoff(node);
0081     node->cached_vma_last = avc_last_pgoff(node);
0082 #endif
0083     __anon_vma_interval_tree_insert(node, root);
0084 }
0085 
0086 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
0087                    struct rb_root *root)
0088 {
0089     __anon_vma_interval_tree_remove(node, root);
0090 }
0091 
0092 struct anon_vma_chain *
0093 anon_vma_interval_tree_iter_first(struct rb_root *root,
0094                   unsigned long first, unsigned long last)
0095 {
0096     return __anon_vma_interval_tree_iter_first(root, first, last);
0097 }
0098 
0099 struct anon_vma_chain *
0100 anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
0101                  unsigned long first, unsigned long last)
0102 {
0103     return __anon_vma_interval_tree_iter_next(node, first, last);
0104 }
0105 
0106 #ifdef CONFIG_DEBUG_VM_RB
0107 void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
0108 {
0109     WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
0110     WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
0111 }
0112 #endif