0001
0002
0003 #undef TRACE_SYSTEM_VAR
0004
0005 #ifdef CONFIG_PERF_EVENTS
0006
0007 #undef __entry
0008 #define __entry entry
0009
0010 #undef __get_dynamic_array
0011 #define __get_dynamic_array(field) \
0012 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
0013
0014 #undef __get_dynamic_array_len
0015 #define __get_dynamic_array_len(field) \
0016 ((__entry->__data_loc_##field >> 16) & 0xffff)
0017
0018 #undef __get_str
0019 #define __get_str(field) ((char *)__get_dynamic_array(field))
0020
0021 #undef __get_bitmask
0022 #define __get_bitmask(field) (char *)__get_dynamic_array(field)
0023
0024 #undef __get_sockaddr
0025 #define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
0026
0027 #undef __get_rel_dynamic_array
0028 #define __get_rel_dynamic_array(field) \
0029 ((void *)__entry + \
0030 offsetof(typeof(*__entry), __rel_loc_##field) + \
0031 sizeof(__entry->__rel_loc_##field) + \
0032 (__entry->__rel_loc_##field & 0xffff))
0033
0034 #undef __get_rel_dynamic_array_len
0035 #define __get_rel_dynamic_array_len(field) \
0036 ((__entry->__rel_loc_##field >> 16) & 0xffff)
0037
0038 #undef __get_rel_str
0039 #define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
0040
0041 #undef __get_rel_bitmask
0042 #define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
0043
0044 #undef __get_rel_sockaddr
0045 #define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
0046
0047 #undef __perf_count
0048 #define __perf_count(c) (__count = (c))
0049
0050 #undef __perf_task
0051 #define __perf_task(t) (__task = (t))
0052
0053 #undef DECLARE_EVENT_CLASS
0054 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
0055 static notrace void \
0056 perf_trace_##call(void *__data, proto) \
0057 { \
0058 struct trace_event_call *event_call = __data; \
0059 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
0060 struct trace_event_raw_##call *entry; \
0061 struct pt_regs *__regs; \
0062 u64 __count = 1; \
0063 struct task_struct *__task = NULL; \
0064 struct hlist_head *head; \
0065 int __entry_size; \
0066 int __data_size; \
0067 int rctx; \
0068 \
0069 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
0070 \
0071 head = this_cpu_ptr(event_call->perf_events); \
0072 if (!bpf_prog_array_valid(event_call) && \
0073 __builtin_constant_p(!__task) && !__task && \
0074 hlist_empty(head)) \
0075 return; \
0076 \
0077 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
0078 sizeof(u64)); \
0079 __entry_size -= sizeof(u32); \
0080 \
0081 entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
0082 if (!entry) \
0083 return; \
0084 \
0085 perf_fetch_caller_regs(__regs); \
0086 \
0087 tstruct \
0088 \
0089 { assign; } \
0090 \
0091 perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
0092 event_call, __count, __regs, \
0093 head, __task); \
0094 }
0095
0096
0097
0098
0099
0100
0101 #undef DEFINE_EVENT
0102 #define DEFINE_EVENT(template, call, proto, args) \
0103 static inline void perf_test_probe_##call(void) \
0104 { \
0105 check_trace_callback_type_##call(perf_trace_##template); \
0106 }
0107
0108
0109 #undef DEFINE_EVENT_PRINT
0110 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
0111 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
0112
0113 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
0114 #endif