Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _KERNEL_EVENTS_INTERNAL_H
0003 #define _KERNEL_EVENTS_INTERNAL_H
0004 
0005 #include <linux/hardirq.h>
0006 #include <linux/uaccess.h>
0007 #include <linux/refcount.h>
0008 
0009 /* Buffer handling */
0010 
0011 #define RING_BUFFER_WRITABLE        0x01
0012 
0013 struct perf_buffer {
0014     refcount_t          refcount;
0015     struct rcu_head         rcu_head;
0016 #ifdef CONFIG_PERF_USE_VMALLOC
0017     struct work_struct      work;
0018     int             page_order; /* allocation order  */
0019 #endif
0020     int             nr_pages;   /* nr of data pages  */
0021     int             overwrite;  /* can overwrite itself */
0022     int             paused;     /* can write into ring buffer */
0023 
0024     atomic_t            poll;       /* POLL_ for wakeups */
0025 
0026     local_t             head;       /* write position    */
0027     unsigned int            nest;       /* nested writers    */
0028     local_t             events;     /* event limit       */
0029     local_t             wakeup;     /* wakeup stamp      */
0030     local_t             lost;       /* nr records lost   */
0031 
0032     long                watermark;  /* wakeup watermark  */
0033     long                aux_watermark;
0034     /* poll crap */
0035     spinlock_t          event_lock;
0036     struct list_head        event_list;
0037 
0038     atomic_t            mmap_count;
0039     unsigned long           mmap_locked;
0040     struct user_struct      *mmap_user;
0041 
0042     /* AUX area */
0043     long                aux_head;
0044     unsigned int            aux_nest;
0045     long                aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
0046     unsigned long           aux_pgoff;
0047     int             aux_nr_pages;
0048     int             aux_overwrite;
0049     atomic_t            aux_mmap_count;
0050     unsigned long           aux_mmap_locked;
0051     void                (*free_aux)(void *);
0052     refcount_t          aux_refcount;
0053     int             aux_in_sampling;
0054     void                **aux_pages;
0055     void                *aux_priv;
0056 
0057     struct perf_event_mmap_page *user_page;
0058     void                *data_pages[];
0059 };
0060 
0061 extern void rb_free(struct perf_buffer *rb);
0062 
0063 static inline void rb_free_rcu(struct rcu_head *rcu_head)
0064 {
0065     struct perf_buffer *rb;
0066 
0067     rb = container_of(rcu_head, struct perf_buffer, rcu_head);
0068     rb_free(rb);
0069 }
0070 
0071 static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
0072 {
0073     if (!pause && rb->nr_pages)
0074         rb->paused = 0;
0075     else
0076         rb->paused = 1;
0077 }
0078 
0079 extern struct perf_buffer *
0080 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
0081 extern void perf_event_wakeup(struct perf_event *event);
0082 extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
0083             pgoff_t pgoff, int nr_pages, long watermark, int flags);
0084 extern void rb_free_aux(struct perf_buffer *rb);
0085 extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
0086 extern void ring_buffer_put(struct perf_buffer *rb);
0087 
0088 static inline bool rb_has_aux(struct perf_buffer *rb)
0089 {
0090     return !!rb->aux_nr_pages;
0091 }
0092 
0093 void perf_event_aux_event(struct perf_event *event, unsigned long head,
0094               unsigned long size, u64 flags);
0095 
0096 extern struct page *
0097 perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
0098 
0099 #ifdef CONFIG_PERF_USE_VMALLOC
0100 /*
0101  * Back perf_mmap() with vmalloc memory.
0102  *
0103  * Required for architectures that have d-cache aliasing issues.
0104  */
0105 
0106 static inline int page_order(struct perf_buffer *rb)
0107 {
0108     return rb->page_order;
0109 }
0110 
0111 #else
0112 
0113 static inline int page_order(struct perf_buffer *rb)
0114 {
0115     return 0;
0116 }
0117 #endif
0118 
0119 static inline int data_page_nr(struct perf_buffer *rb)
0120 {
0121     return rb->nr_pages << page_order(rb);
0122 }
0123 
0124 static inline unsigned long perf_data_size(struct perf_buffer *rb)
0125 {
0126     return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
0127 }
0128 
0129 static inline unsigned long perf_aux_size(struct perf_buffer *rb)
0130 {
0131     return rb->aux_nr_pages << PAGE_SHIFT;
0132 }
0133 
0134 #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)    \
0135 {                                   \
0136     unsigned long size, written;                    \
0137                                     \
0138     do {                                \
0139         size    = min(handle->size, len);           \
0140         written = memcpy_func(__VA_ARGS__);         \
0141         written = size - written;               \
0142                                     \
0143         len -= written;                     \
0144         handle->addr += written;                \
0145         if (advance_buf)                    \
0146             buf += written;                 \
0147         handle->size -= written;                \
0148         if (!handle->size) {                    \
0149             struct perf_buffer *rb = handle->rb;    \
0150                                     \
0151             handle->page++;                 \
0152             handle->page &= rb->nr_pages - 1;       \
0153             handle->addr = rb->data_pages[handle->page];    \
0154             handle->size = PAGE_SIZE << page_order(rb); \
0155         }                           \
0156     } while (len && written == size);               \
0157                                     \
0158     return len;                         \
0159 }
0160 
0161 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)          \
0162 static inline unsigned long                     \
0163 func_name(struct perf_output_handle *handle,                \
0164       const void *buf, unsigned long len)               \
0165 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
0166 
0167 static inline unsigned long
0168 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
0169         const void *buf, unsigned long len)
0170 {
0171     unsigned long orig_len = len;
0172     __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
0173                   orig_len - len, size)
0174 }
0175 
0176 static inline unsigned long
0177 memcpy_common(void *dst, const void *src, unsigned long n)
0178 {
0179     memcpy(dst, src, n);
0180     return 0;
0181 }
0182 
0183 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
0184 
0185 static inline unsigned long
0186 memcpy_skip(void *dst, const void *src, unsigned long n)
0187 {
0188     return 0;
0189 }
0190 
0191 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
0192 
0193 #ifndef arch_perf_out_copy_user
0194 #define arch_perf_out_copy_user arch_perf_out_copy_user
0195 
0196 static inline unsigned long
0197 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
0198 {
0199     unsigned long ret;
0200 
0201     pagefault_disable();
0202     ret = __copy_from_user_inatomic(dst, src, n);
0203     pagefault_enable();
0204 
0205     return ret;
0206 }
0207 #endif
0208 
0209 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
0210 
0211 static inline int get_recursion_context(int *recursion)
0212 {
0213     unsigned char rctx = interrupt_context_level();
0214 
0215     if (recursion[rctx])
0216         return -1;
0217 
0218     recursion[rctx]++;
0219     barrier();
0220 
0221     return rctx;
0222 }
0223 
0224 static inline void put_recursion_context(int *recursion, int rctx)
0225 {
0226     barrier();
0227     recursion[rctx]--;
0228 }
0229 
0230 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
0231 static inline bool arch_perf_have_user_stack_dump(void)
0232 {
0233     return true;
0234 }
0235 
0236 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
0237 #else
0238 static inline bool arch_perf_have_user_stack_dump(void)
0239 {
0240     return false;
0241 }
0242 
0243 #define perf_user_stack_pointer(regs) 0
0244 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
0245 
0246 #endif /* _KERNEL_EVENTS_INTERNAL_H */