Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #undef TRACE_SYSTEM
0003 #define TRACE_SYSTEM kmem
0004 
0005 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
0006 #define _TRACE_KMEM_H
0007 
0008 #include <linux/types.h>
0009 #include <linux/tracepoint.h>
0010 #include <trace/events/mmflags.h>
0011 
0012 DECLARE_EVENT_CLASS(kmem_alloc,
0013 
0014     TP_PROTO(unsigned long call_site,
0015          const void *ptr,
0016          struct kmem_cache *s,
0017          size_t bytes_req,
0018          size_t bytes_alloc,
0019          gfp_t gfp_flags),
0020 
0021     TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
0022 
0023     TP_STRUCT__entry(
0024         __field(    unsigned long,  call_site   )
0025         __field(    const void *,   ptr     )
0026         __field(    size_t,     bytes_req   )
0027         __field(    size_t,     bytes_alloc )
0028         __field(    unsigned long,  gfp_flags   )
0029         __field(    bool,       accounted   )
0030     ),
0031 
0032     TP_fast_assign(
0033         __entry->call_site  = call_site;
0034         __entry->ptr        = ptr;
0035         __entry->bytes_req  = bytes_req;
0036         __entry->bytes_alloc    = bytes_alloc;
0037         __entry->gfp_flags  = (__force unsigned long)gfp_flags;
0038         __entry->accounted  = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
0039                       ((gfp_flags & __GFP_ACCOUNT) ||
0040                       (s && s->flags & SLAB_ACCOUNT)) : false;
0041     ),
0042 
0043     TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
0044         (void *)__entry->call_site,
0045         __entry->ptr,
0046         __entry->bytes_req,
0047         __entry->bytes_alloc,
0048         show_gfp_flags(__entry->gfp_flags),
0049         __entry->accounted ? "true" : "false")
0050 );
0051 
0052 DEFINE_EVENT(kmem_alloc, kmalloc,
0053 
0054     TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
0055          size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
0056 
0057     TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
0058 );
0059 
0060 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
0061 
0062     TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
0063          size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
0064 
0065     TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
0066 );
0067 
0068 DECLARE_EVENT_CLASS(kmem_alloc_node,
0069 
0070     TP_PROTO(unsigned long call_site,
0071          const void *ptr,
0072          struct kmem_cache *s,
0073          size_t bytes_req,
0074          size_t bytes_alloc,
0075          gfp_t gfp_flags,
0076          int node),
0077 
0078     TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
0079 
0080     TP_STRUCT__entry(
0081         __field(    unsigned long,  call_site   )
0082         __field(    const void *,   ptr     )
0083         __field(    size_t,     bytes_req   )
0084         __field(    size_t,     bytes_alloc )
0085         __field(    unsigned long,  gfp_flags   )
0086         __field(    int,        node        )
0087         __field(    bool,       accounted   )
0088     ),
0089 
0090     TP_fast_assign(
0091         __entry->call_site  = call_site;
0092         __entry->ptr        = ptr;
0093         __entry->bytes_req  = bytes_req;
0094         __entry->bytes_alloc    = bytes_alloc;
0095         __entry->gfp_flags  = (__force unsigned long)gfp_flags;
0096         __entry->node       = node;
0097         __entry->accounted  = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
0098                       ((gfp_flags & __GFP_ACCOUNT) ||
0099                       (s && s->flags & SLAB_ACCOUNT)) : false;
0100     ),
0101 
0102     TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
0103         (void *)__entry->call_site,
0104         __entry->ptr,
0105         __entry->bytes_req,
0106         __entry->bytes_alloc,
0107         show_gfp_flags(__entry->gfp_flags),
0108         __entry->node,
0109         __entry->accounted ? "true" : "false")
0110 );
0111 
0112 DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
0113 
0114     TP_PROTO(unsigned long call_site, const void *ptr,
0115          struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
0116          gfp_t gfp_flags, int node),
0117 
0118     TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
0119 );
0120 
0121 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
0122 
0123     TP_PROTO(unsigned long call_site, const void *ptr,
0124          struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
0125          gfp_t gfp_flags, int node),
0126 
0127     TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
0128 );
0129 
0130 TRACE_EVENT(kfree,
0131 
0132     TP_PROTO(unsigned long call_site, const void *ptr),
0133 
0134     TP_ARGS(call_site, ptr),
0135 
0136     TP_STRUCT__entry(
0137         __field(    unsigned long,  call_site   )
0138         __field(    const void *,   ptr     )
0139     ),
0140 
0141     TP_fast_assign(
0142         __entry->call_site  = call_site;
0143         __entry->ptr        = ptr;
0144     ),
0145 
0146     TP_printk("call_site=%pS ptr=%p",
0147           (void *)__entry->call_site, __entry->ptr)
0148 );
0149 
0150 TRACE_EVENT(kmem_cache_free,
0151 
0152     TP_PROTO(unsigned long call_site, const void *ptr, const char *name),
0153 
0154     TP_ARGS(call_site, ptr, name),
0155 
0156     TP_STRUCT__entry(
0157         __field(    unsigned long,  call_site   )
0158         __field(    const void *,   ptr     )
0159         __string(   name,   name    )
0160     ),
0161 
0162     TP_fast_assign(
0163         __entry->call_site  = call_site;
0164         __entry->ptr        = ptr;
0165         __assign_str(name, name);
0166     ),
0167 
0168     TP_printk("call_site=%pS ptr=%p name=%s",
0169           (void *)__entry->call_site, __entry->ptr, __get_str(name))
0170 );
0171 
0172 TRACE_EVENT(mm_page_free,
0173 
0174     TP_PROTO(struct page *page, unsigned int order),
0175 
0176     TP_ARGS(page, order),
0177 
0178     TP_STRUCT__entry(
0179         __field(    unsigned long,  pfn     )
0180         __field(    unsigned int,   order       )
0181     ),
0182 
0183     TP_fast_assign(
0184         __entry->pfn        = page_to_pfn(page);
0185         __entry->order      = order;
0186     ),
0187 
0188     TP_printk("page=%p pfn=0x%lx order=%d",
0189             pfn_to_page(__entry->pfn),
0190             __entry->pfn,
0191             __entry->order)
0192 );
0193 
0194 TRACE_EVENT(mm_page_free_batched,
0195 
0196     TP_PROTO(struct page *page),
0197 
0198     TP_ARGS(page),
0199 
0200     TP_STRUCT__entry(
0201         __field(    unsigned long,  pfn     )
0202     ),
0203 
0204     TP_fast_assign(
0205         __entry->pfn        = page_to_pfn(page);
0206     ),
0207 
0208     TP_printk("page=%p pfn=0x%lx order=0",
0209             pfn_to_page(__entry->pfn),
0210             __entry->pfn)
0211 );
0212 
0213 TRACE_EVENT(mm_page_alloc,
0214 
0215     TP_PROTO(struct page *page, unsigned int order,
0216             gfp_t gfp_flags, int migratetype),
0217 
0218     TP_ARGS(page, order, gfp_flags, migratetype),
0219 
0220     TP_STRUCT__entry(
0221         __field(    unsigned long,  pfn     )
0222         __field(    unsigned int,   order       )
0223         __field(    unsigned long,  gfp_flags   )
0224         __field(    int,        migratetype )
0225     ),
0226 
0227     TP_fast_assign(
0228         __entry->pfn        = page ? page_to_pfn(page) : -1UL;
0229         __entry->order      = order;
0230         __entry->gfp_flags  = (__force unsigned long)gfp_flags;
0231         __entry->migratetype    = migratetype;
0232     ),
0233 
0234     TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
0235         __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
0236         __entry->pfn != -1UL ? __entry->pfn : 0,
0237         __entry->order,
0238         __entry->migratetype,
0239         show_gfp_flags(__entry->gfp_flags))
0240 );
0241 
0242 DECLARE_EVENT_CLASS(mm_page,
0243 
0244     TP_PROTO(struct page *page, unsigned int order, int migratetype,
0245          int percpu_refill),
0246 
0247     TP_ARGS(page, order, migratetype, percpu_refill),
0248 
0249     TP_STRUCT__entry(
0250         __field(    unsigned long,  pfn     )
0251         __field(    unsigned int,   order       )
0252         __field(    int,        migratetype )
0253         __field(    int,        percpu_refill   )
0254     ),
0255 
0256     TP_fast_assign(
0257         __entry->pfn        = page ? page_to_pfn(page) : -1UL;
0258         __entry->order      = order;
0259         __entry->migratetype    = migratetype;
0260         __entry->percpu_refill  = percpu_refill;
0261     ),
0262 
0263     TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
0264         __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
0265         __entry->pfn != -1UL ? __entry->pfn : 0,
0266         __entry->order,
0267         __entry->migratetype,
0268         __entry->percpu_refill)
0269 );
0270 
0271 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
0272 
0273     TP_PROTO(struct page *page, unsigned int order, int migratetype,
0274          int percpu_refill),
0275 
0276     TP_ARGS(page, order, migratetype, percpu_refill)
0277 );
0278 
0279 TRACE_EVENT(mm_page_pcpu_drain,
0280 
0281     TP_PROTO(struct page *page, unsigned int order, int migratetype),
0282 
0283     TP_ARGS(page, order, migratetype),
0284 
0285     TP_STRUCT__entry(
0286         __field(    unsigned long,  pfn     )
0287         __field(    unsigned int,   order       )
0288         __field(    int,        migratetype )
0289     ),
0290 
0291     TP_fast_assign(
0292         __entry->pfn        = page ? page_to_pfn(page) : -1UL;
0293         __entry->order      = order;
0294         __entry->migratetype    = migratetype;
0295     ),
0296 
0297     TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
0298         pfn_to_page(__entry->pfn), __entry->pfn,
0299         __entry->order, __entry->migratetype)
0300 );
0301 
0302 TRACE_EVENT(mm_page_alloc_extfrag,
0303 
0304     TP_PROTO(struct page *page,
0305         int alloc_order, int fallback_order,
0306         int alloc_migratetype, int fallback_migratetype),
0307 
0308     TP_ARGS(page,
0309         alloc_order, fallback_order,
0310         alloc_migratetype, fallback_migratetype),
0311 
0312     TP_STRUCT__entry(
0313         __field(    unsigned long,  pfn         )
0314         __field(    int,        alloc_order     )
0315         __field(    int,        fallback_order      )
0316         __field(    int,        alloc_migratetype   )
0317         __field(    int,        fallback_migratetype    )
0318         __field(    int,        change_ownership    )
0319     ),
0320 
0321     TP_fast_assign(
0322         __entry->pfn            = page_to_pfn(page);
0323         __entry->alloc_order        = alloc_order;
0324         __entry->fallback_order     = fallback_order;
0325         __entry->alloc_migratetype  = alloc_migratetype;
0326         __entry->fallback_migratetype   = fallback_migratetype;
0327         __entry->change_ownership   = (alloc_migratetype ==
0328                     get_pageblock_migratetype(page));
0329     ),
0330 
0331     TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
0332         pfn_to_page(__entry->pfn),
0333         __entry->pfn,
0334         __entry->alloc_order,
0335         __entry->fallback_order,
0336         pageblock_order,
0337         __entry->alloc_migratetype,
0338         __entry->fallback_migratetype,
0339         __entry->fallback_order < pageblock_order,
0340         __entry->change_ownership)
0341 );
0342 
0343 /*
0344  * Required for uniquely and securely identifying mm in rss_stat tracepoint.
0345  */
0346 #ifndef __PTR_TO_HASHVAL
0347 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
0348 {
0349     int ret;
0350     unsigned long hashval;
0351 
0352     ret = ptr_to_hashval(ptr, &hashval);
0353     if (ret)
0354         return 0;
0355 
0356     /* The hashed value is only 32-bit */
0357     return (unsigned int)hashval;
0358 }
0359 #define __PTR_TO_HASHVAL
0360 #endif
0361 
0362 #define TRACE_MM_PAGES      \
0363     EM(MM_FILEPAGES)    \
0364     EM(MM_ANONPAGES)    \
0365     EM(MM_SWAPENTS)     \
0366     EMe(MM_SHMEMPAGES)
0367 
0368 #undef EM
0369 #undef EMe
0370 
0371 #define EM(a)   TRACE_DEFINE_ENUM(a);
0372 #define EMe(a)  TRACE_DEFINE_ENUM(a);
0373 
0374 TRACE_MM_PAGES
0375 
0376 #undef EM
0377 #undef EMe
0378 
0379 #define EM(a)   { a, #a },
0380 #define EMe(a)  { a, #a }
0381 
0382 TRACE_EVENT(rss_stat,
0383 
0384     TP_PROTO(struct mm_struct *mm,
0385         int member,
0386         long count),
0387 
0388     TP_ARGS(mm, member, count),
0389 
0390     TP_STRUCT__entry(
0391         __field(unsigned int, mm_id)
0392         __field(unsigned int, curr)
0393         __field(int, member)
0394         __field(long, size)
0395     ),
0396 
0397     TP_fast_assign(
0398         __entry->mm_id = mm_ptr_to_hash(mm);
0399         __entry->curr = !!(current->mm == mm);
0400         __entry->member = member;
0401         __entry->size = (count << PAGE_SHIFT);
0402     ),
0403 
0404     TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
0405         __entry->mm_id,
0406         __entry->curr,
0407         __print_symbolic(__entry->member, TRACE_MM_PAGES),
0408         __entry->size)
0409     );
0410 #endif /* _TRACE_KMEM_H */
0411 
0412 /* This part must be outside protection */
0413 #include <trace/define_trace.h>