0001
0002 #define CREATE_TRACE_POINTS
0003 #include <trace/events/mmap_lock.h>
0004
0005 #include <linux/mm.h>
0006 #include <linux/cgroup.h>
0007 #include <linux/memcontrol.h>
0008 #include <linux/mmap_lock.h>
0009 #include <linux/mutex.h>
0010 #include <linux/percpu.h>
0011 #include <linux/rcupdate.h>
0012 #include <linux/smp.h>
0013 #include <linux/trace_events.h>
0014 #include <linux/local_lock.h>
0015
0016 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
0017 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
0018 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
0019
0020 #ifdef CONFIG_MEMCG
0021
0022
0023
0024
0025
0026
0027
0028 static DEFINE_MUTEX(reg_lock);
0029 static int reg_refcount;
0030
0031
0032
0033
0034
0035 #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
0036
0037
0038
0039
0040
0041 #define CONTEXT_COUNT 4
0042
0043 struct memcg_path {
0044 local_lock_t lock;
0045 char __rcu *buf;
0046 local_t buf_idx;
0047 };
0048 static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
0049 .lock = INIT_LOCAL_LOCK(lock),
0050 .buf_idx = LOCAL_INIT(0),
0051 };
0052
0053 static char **tmp_bufs;
0054
0055
0056 static void free_memcg_path_bufs(void)
0057 {
0058 struct memcg_path *memcg_path;
0059 int cpu;
0060 char **old = tmp_bufs;
0061
0062 for_each_possible_cpu(cpu) {
0063 memcg_path = per_cpu_ptr(&memcg_paths, cpu);
0064 *(old++) = rcu_dereference_protected(memcg_path->buf,
0065 lockdep_is_held(®_lock));
0066 rcu_assign_pointer(memcg_path->buf, NULL);
0067 }
0068
0069
0070 synchronize_rcu();
0071
0072 old = tmp_bufs;
0073 for_each_possible_cpu(cpu) {
0074 kfree(*(old++));
0075 }
0076
0077 kfree(tmp_bufs);
0078 tmp_bufs = NULL;
0079 }
0080
0081 int trace_mmap_lock_reg(void)
0082 {
0083 int cpu;
0084 char *new;
0085
0086 mutex_lock(®_lock);
0087
0088
0089 if (reg_refcount++)
0090 goto out;
0091
0092 tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
0093 GFP_KERNEL);
0094 if (tmp_bufs == NULL)
0095 goto out_fail;
0096
0097 for_each_possible_cpu(cpu) {
0098 new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
0099 if (new == NULL)
0100 goto out_fail_free;
0101 rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
0102
0103 }
0104
0105 out:
0106 mutex_unlock(®_lock);
0107 return 0;
0108
0109 out_fail_free:
0110 free_memcg_path_bufs();
0111 out_fail:
0112
0113 --reg_refcount;
0114
0115 mutex_unlock(®_lock);
0116 return -ENOMEM;
0117 }
0118
0119 void trace_mmap_lock_unreg(void)
0120 {
0121 mutex_lock(®_lock);
0122
0123
0124 if (--reg_refcount)
0125 goto out;
0126
0127 free_memcg_path_bufs();
0128
0129 out:
0130 mutex_unlock(®_lock);
0131 }
0132
0133 static inline char *get_memcg_path_buf(void)
0134 {
0135 struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
0136 char *buf;
0137 int idx;
0138
0139 rcu_read_lock();
0140 buf = rcu_dereference(memcg_path->buf);
0141 if (buf == NULL) {
0142 rcu_read_unlock();
0143 return NULL;
0144 }
0145 idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
0146 MEMCG_PATH_BUF_SIZE;
0147 return &buf[idx];
0148 }
0149
0150 static inline void put_memcg_path_buf(void)
0151 {
0152 local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
0153 rcu_read_unlock();
0154 }
0155
0156 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
0157 do { \
0158 const char *memcg_path; \
0159 local_lock(&memcg_paths.lock); \
0160 memcg_path = get_mm_memcg_path(mm); \
0161 trace_mmap_lock_##type(mm, \
0162 memcg_path != NULL ? memcg_path : "", \
0163 ##__VA_ARGS__); \
0164 if (likely(memcg_path != NULL)) \
0165 put_memcg_path_buf(); \
0166 local_unlock(&memcg_paths.lock); \
0167 } while (0)
0168
0169 #else
0170
0171 int trace_mmap_lock_reg(void)
0172 {
0173 return 0;
0174 }
0175
0176 void trace_mmap_lock_unreg(void)
0177 {
0178 }
0179
0180 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
0181 trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
0182
0183 #endif
0184
0185 #ifdef CONFIG_TRACING
0186 #ifdef CONFIG_MEMCG
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199 static const char *get_mm_memcg_path(struct mm_struct *mm)
0200 {
0201 char *buf = NULL;
0202 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
0203
0204 if (memcg == NULL)
0205 goto out;
0206 if (unlikely(memcg->css.cgroup == NULL))
0207 goto out_put;
0208
0209 buf = get_memcg_path_buf();
0210 if (buf == NULL)
0211 goto out_put;
0212
0213 cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE);
0214
0215 out_put:
0216 css_put(&memcg->css);
0217 out:
0218 return buf;
0219 }
0220
0221 #endif
0222
0223
0224
0225
0226
0227
0228 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
0229 {
0230 TRACE_MMAP_LOCK_EVENT(start_locking, mm, write);
0231 }
0232 EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking);
0233
0234 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
0235 bool success)
0236 {
0237 TRACE_MMAP_LOCK_EVENT(acquire_returned, mm, write, success);
0238 }
0239 EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned);
0240
0241 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
0242 {
0243 TRACE_MMAP_LOCK_EVENT(released, mm, write);
0244 }
0245 EXPORT_SYMBOL(__mmap_lock_do_trace_released);
0246 #endif