Back to home page

OSCL-LXR

 
 

    


0001 #ifndef _LINUX_MMAP_LOCK_H
0002 #define _LINUX_MMAP_LOCK_H
0003 
0004 #include <linux/lockdep.h>
0005 #include <linux/mm_types.h>
0006 #include <linux/mmdebug.h>
0007 #include <linux/rwsem.h>
0008 #include <linux/tracepoint-defs.h>
0009 #include <linux/types.h>
0010 
0011 #define MMAP_LOCK_INITIALIZER(name) \
0012     .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
0013 
0014 DECLARE_TRACEPOINT(mmap_lock_start_locking);
0015 DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
0016 DECLARE_TRACEPOINT(mmap_lock_released);
0017 
0018 #ifdef CONFIG_TRACING
0019 
0020 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
0021 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
0022                        bool success);
0023 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
0024 
0025 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
0026                            bool write)
0027 {
0028     if (tracepoint_enabled(mmap_lock_start_locking))
0029         __mmap_lock_do_trace_start_locking(mm, write);
0030 }
0031 
0032 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
0033                               bool write, bool success)
0034 {
0035     if (tracepoint_enabled(mmap_lock_acquire_returned))
0036         __mmap_lock_do_trace_acquire_returned(mm, write, success);
0037 }
0038 
0039 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
0040 {
0041     if (tracepoint_enabled(mmap_lock_released))
0042         __mmap_lock_do_trace_released(mm, write);
0043 }
0044 
0045 #else /* !CONFIG_TRACING */
0046 
0047 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
0048                            bool write)
0049 {
0050 }
0051 
0052 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
0053                               bool write, bool success)
0054 {
0055 }
0056 
0057 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
0058 {
0059 }
0060 
0061 #endif /* CONFIG_TRACING */
0062 
0063 static inline void mmap_init_lock(struct mm_struct *mm)
0064 {
0065     init_rwsem(&mm->mmap_lock);
0066 }
0067 
0068 static inline void mmap_write_lock(struct mm_struct *mm)
0069 {
0070     __mmap_lock_trace_start_locking(mm, true);
0071     down_write(&mm->mmap_lock);
0072     __mmap_lock_trace_acquire_returned(mm, true, true);
0073 }
0074 
0075 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
0076 {
0077     __mmap_lock_trace_start_locking(mm, true);
0078     down_write_nested(&mm->mmap_lock, subclass);
0079     __mmap_lock_trace_acquire_returned(mm, true, true);
0080 }
0081 
0082 static inline int mmap_write_lock_killable(struct mm_struct *mm)
0083 {
0084     int ret;
0085 
0086     __mmap_lock_trace_start_locking(mm, true);
0087     ret = down_write_killable(&mm->mmap_lock);
0088     __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
0089     return ret;
0090 }
0091 
0092 static inline bool mmap_write_trylock(struct mm_struct *mm)
0093 {
0094     bool ret;
0095 
0096     __mmap_lock_trace_start_locking(mm, true);
0097     ret = down_write_trylock(&mm->mmap_lock) != 0;
0098     __mmap_lock_trace_acquire_returned(mm, true, ret);
0099     return ret;
0100 }
0101 
0102 static inline void mmap_write_unlock(struct mm_struct *mm)
0103 {
0104     __mmap_lock_trace_released(mm, true);
0105     up_write(&mm->mmap_lock);
0106 }
0107 
0108 static inline void mmap_write_downgrade(struct mm_struct *mm)
0109 {
0110     __mmap_lock_trace_acquire_returned(mm, false, true);
0111     downgrade_write(&mm->mmap_lock);
0112 }
0113 
0114 static inline void mmap_read_lock(struct mm_struct *mm)
0115 {
0116     __mmap_lock_trace_start_locking(mm, false);
0117     down_read(&mm->mmap_lock);
0118     __mmap_lock_trace_acquire_returned(mm, false, true);
0119 }
0120 
0121 static inline int mmap_read_lock_killable(struct mm_struct *mm)
0122 {
0123     int ret;
0124 
0125     __mmap_lock_trace_start_locking(mm, false);
0126     ret = down_read_killable(&mm->mmap_lock);
0127     __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
0128     return ret;
0129 }
0130 
0131 static inline bool mmap_read_trylock(struct mm_struct *mm)
0132 {
0133     bool ret;
0134 
0135     __mmap_lock_trace_start_locking(mm, false);
0136     ret = down_read_trylock(&mm->mmap_lock) != 0;
0137     __mmap_lock_trace_acquire_returned(mm, false, ret);
0138     return ret;
0139 }
0140 
0141 static inline void mmap_read_unlock(struct mm_struct *mm)
0142 {
0143     __mmap_lock_trace_released(mm, false);
0144     up_read(&mm->mmap_lock);
0145 }
0146 
0147 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
0148 {
0149     __mmap_lock_trace_released(mm, false);
0150     up_read_non_owner(&mm->mmap_lock);
0151 }
0152 
0153 static inline void mmap_assert_locked(struct mm_struct *mm)
0154 {
0155     lockdep_assert_held(&mm->mmap_lock);
0156     VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
0157 }
0158 
0159 static inline void mmap_assert_write_locked(struct mm_struct *mm)
0160 {
0161     lockdep_assert_held_write(&mm->mmap_lock);
0162     VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
0163 }
0164 
0165 static inline int mmap_lock_is_contended(struct mm_struct *mm)
0166 {
0167     return rwsem_is_contended(&mm->mmap_lock);
0168 }
0169 
0170 #endif /* _LINUX_MMAP_LOCK_H */