0001
0002 #undef TRACE_SYSTEM
0003 #define TRACE_SYSTEM xen
0004
0005 #if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ)
0006 #define _TRACE_XEN_H
0007
0008 #include <linux/tracepoint.h>
0009 #include <asm/paravirt_types.h>
0010 #include <asm/xen/trace_types.h>
0011
0012 struct multicall_entry;
0013
0014
0015 DECLARE_EVENT_CLASS(xen_mc__batch,
0016 TP_PROTO(enum paravirt_lazy_mode mode),
0017 TP_ARGS(mode),
0018 TP_STRUCT__entry(
0019 __field(enum paravirt_lazy_mode, mode)
0020 ),
0021 TP_fast_assign(__entry->mode = mode),
0022 TP_printk("start batch LAZY_%s",
0023 (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
0024 (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
0025 );
0026 #define DEFINE_XEN_MC_BATCH(name) \
0027 DEFINE_EVENT(xen_mc__batch, name, \
0028 TP_PROTO(enum paravirt_lazy_mode mode), \
0029 TP_ARGS(mode))
0030
0031 DEFINE_XEN_MC_BATCH(xen_mc_batch);
0032 DEFINE_XEN_MC_BATCH(xen_mc_issue);
0033
0034 TRACE_DEFINE_SIZEOF(ulong);
0035
0036 TRACE_EVENT(xen_mc_entry,
0037 TP_PROTO(struct multicall_entry *mc, unsigned nargs),
0038 TP_ARGS(mc, nargs),
0039 TP_STRUCT__entry(
0040 __field(unsigned int, op)
0041 __field(unsigned int, nargs)
0042 __array(unsigned long, args, 6)
0043 ),
0044 TP_fast_assign(__entry->op = mc->op;
0045 __entry->nargs = nargs;
0046 memcpy(__entry->args, mc->args, sizeof(ulong) * nargs);
0047 memset(__entry->args + nargs, 0, sizeof(ulong) * (6 - nargs));
0048 ),
0049 TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]",
0050 __entry->op, xen_hypercall_name(__entry->op),
0051 __entry->args[0], __entry->args[1], __entry->args[2],
0052 __entry->args[3], __entry->args[4], __entry->args[5])
0053 );
0054
0055 TRACE_EVENT(xen_mc_entry_alloc,
0056 TP_PROTO(size_t args),
0057 TP_ARGS(args),
0058 TP_STRUCT__entry(
0059 __field(size_t, args)
0060 ),
0061 TP_fast_assign(__entry->args = args),
0062 TP_printk("alloc entry %zu arg bytes", __entry->args)
0063 );
0064
0065 TRACE_EVENT(xen_mc_callback,
0066 TP_PROTO(xen_mc_callback_fn_t fn, void *data),
0067 TP_ARGS(fn, data),
0068 TP_STRUCT__entry(
0069
0070
0071
0072
0073 __field_struct(xen_mc_callback_fn_t, fn)
0074 __field(void *, data)
0075 ),
0076 TP_fast_assign(
0077 __entry->fn = fn;
0078 __entry->data = data;
0079 ),
0080 TP_printk("callback %ps, data %p",
0081 __entry->fn, __entry->data)
0082 );
0083
0084 TRACE_EVENT(xen_mc_flush_reason,
0085 TP_PROTO(enum xen_mc_flush_reason reason),
0086 TP_ARGS(reason),
0087 TP_STRUCT__entry(
0088 __field(enum xen_mc_flush_reason, reason)
0089 ),
0090 TP_fast_assign(__entry->reason = reason),
0091 TP_printk("flush reason %s",
0092 (__entry->reason == XEN_MC_FL_NONE) ? "NONE" :
0093 (__entry->reason == XEN_MC_FL_BATCH) ? "BATCH" :
0094 (__entry->reason == XEN_MC_FL_ARGS) ? "ARGS" :
0095 (__entry->reason == XEN_MC_FL_CALLBACK) ? "CALLBACK" : "??")
0096 );
0097
0098 TRACE_EVENT(xen_mc_flush,
0099 TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx),
0100 TP_ARGS(mcidx, argidx, cbidx),
0101 TP_STRUCT__entry(
0102 __field(unsigned, mcidx)
0103 __field(unsigned, argidx)
0104 __field(unsigned, cbidx)
0105 ),
0106 TP_fast_assign(__entry->mcidx = mcidx;
0107 __entry->argidx = argidx;
0108 __entry->cbidx = cbidx),
0109 TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks",
0110 __entry->mcidx, __entry->argidx, __entry->cbidx)
0111 );
0112
0113 TRACE_EVENT(xen_mc_extend_args,
0114 TP_PROTO(unsigned long op, size_t args, enum xen_mc_extend_args res),
0115 TP_ARGS(op, args, res),
0116 TP_STRUCT__entry(
0117 __field(unsigned int, op)
0118 __field(size_t, args)
0119 __field(enum xen_mc_extend_args, res)
0120 ),
0121 TP_fast_assign(__entry->op = op;
0122 __entry->args = args;
0123 __entry->res = res),
0124 TP_printk("extending op %u%s by %zu bytes res %s",
0125 __entry->op, xen_hypercall_name(__entry->op),
0126 __entry->args,
0127 __entry->res == XEN_MC_XE_OK ? "OK" :
0128 __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" :
0129 __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
0130 );
0131
0132 TRACE_DEFINE_SIZEOF(pteval_t);
0133
0134 DECLARE_EVENT_CLASS(xen_mmu__set_pte,
0135 TP_PROTO(pte_t *ptep, pte_t pteval),
0136 TP_ARGS(ptep, pteval),
0137 TP_STRUCT__entry(
0138 __field(pte_t *, ptep)
0139 __field(pteval_t, pteval)
0140 ),
0141 TP_fast_assign(__entry->ptep = ptep;
0142 __entry->pteval = pteval.pte),
0143 TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
0144 __entry->ptep,
0145 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
0146 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
0147 );
0148
0149 #define DEFINE_XEN_MMU_SET_PTE(name) \
0150 DEFINE_EVENT(xen_mmu__set_pte, name, \
0151 TP_PROTO(pte_t *ptep, pte_t pteval), \
0152 TP_ARGS(ptep, pteval))
0153
0154 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
0155
0156 TRACE_DEFINE_SIZEOF(pmdval_t);
0157
0158 TRACE_EVENT(xen_mmu_set_pmd,
0159 TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
0160 TP_ARGS(pmdp, pmdval),
0161 TP_STRUCT__entry(
0162 __field(pmd_t *, pmdp)
0163 __field(pmdval_t, pmdval)
0164 ),
0165 TP_fast_assign(__entry->pmdp = pmdp;
0166 __entry->pmdval = pmdval.pmd),
0167 TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
0168 __entry->pmdp,
0169 (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)),
0170 (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
0171 );
0172
0173 #ifdef CONFIG_X86_PAE
0174 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
0175
0176 TRACE_EVENT(xen_mmu_pte_clear,
0177 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
0178 TP_ARGS(mm, addr, ptep),
0179 TP_STRUCT__entry(
0180 __field(struct mm_struct *, mm)
0181 __field(unsigned long, addr)
0182 __field(pte_t *, ptep)
0183 ),
0184 TP_fast_assign(__entry->mm = mm;
0185 __entry->addr = addr;
0186 __entry->ptep = ptep),
0187 TP_printk("mm %p addr %lx ptep %p",
0188 __entry->mm, __entry->addr, __entry->ptep)
0189 );
0190
0191 TRACE_EVENT(xen_mmu_pmd_clear,
0192 TP_PROTO(pmd_t *pmdp),
0193 TP_ARGS(pmdp),
0194 TP_STRUCT__entry(
0195 __field(pmd_t *, pmdp)
0196 ),
0197 TP_fast_assign(__entry->pmdp = pmdp),
0198 TP_printk("pmdp %p", __entry->pmdp)
0199 );
0200 #endif
0201
0202 #if CONFIG_PGTABLE_LEVELS >= 4
0203
0204 TRACE_DEFINE_SIZEOF(pudval_t);
0205
0206 TRACE_EVENT(xen_mmu_set_pud,
0207 TP_PROTO(pud_t *pudp, pud_t pudval),
0208 TP_ARGS(pudp, pudval),
0209 TP_STRUCT__entry(
0210 __field(pud_t *, pudp)
0211 __field(pudval_t, pudval)
0212 ),
0213 TP_fast_assign(__entry->pudp = pudp;
0214 __entry->pudval = native_pud_val(pudval)),
0215 TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
0216 __entry->pudp,
0217 (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)),
0218 (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
0219 );
0220
0221 TRACE_DEFINE_SIZEOF(p4dval_t);
0222
0223 TRACE_EVENT(xen_mmu_set_p4d,
0224 TP_PROTO(p4d_t *p4dp, p4d_t *user_p4dp, p4d_t p4dval),
0225 TP_ARGS(p4dp, user_p4dp, p4dval),
0226 TP_STRUCT__entry(
0227 __field(p4d_t *, p4dp)
0228 __field(p4d_t *, user_p4dp)
0229 __field(p4dval_t, p4dval)
0230 ),
0231 TP_fast_assign(__entry->p4dp = p4dp;
0232 __entry->user_p4dp = user_p4dp;
0233 __entry->p4dval = p4d_val(p4dval)),
0234 TP_printk("p4dp %p user_p4dp %p p4dval %0*llx (raw %0*llx)",
0235 __entry->p4dp, __entry->user_p4dp,
0236 (int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)),
0237 (int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval)
0238 );
0239 #else
0240
0241 TRACE_EVENT(xen_mmu_set_pud,
0242 TP_PROTO(pud_t *pudp, pud_t pudval),
0243 TP_ARGS(pudp, pudval),
0244 TP_STRUCT__entry(
0245 __field(pud_t *, pudp)
0246 __field(pudval_t, pudval)
0247 ),
0248 TP_fast_assign(__entry->pudp = pudp;
0249 __entry->pudval = native_pud_val(pudval)),
0250 TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
0251 __entry->pudp,
0252 (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)),
0253 (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
0254 );
0255
0256 #endif
0257
0258 DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot,
0259 TP_PROTO(struct mm_struct *mm, unsigned long addr,
0260 pte_t *ptep, pte_t pteval),
0261 TP_ARGS(mm, addr, ptep, pteval),
0262 TP_STRUCT__entry(
0263 __field(struct mm_struct *, mm)
0264 __field(unsigned long, addr)
0265 __field(pte_t *, ptep)
0266 __field(pteval_t, pteval)
0267 ),
0268 TP_fast_assign(__entry->mm = mm;
0269 __entry->addr = addr;
0270 __entry->ptep = ptep;
0271 __entry->pteval = pteval.pte),
0272 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
0273 __entry->mm, __entry->addr, __entry->ptep,
0274 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
0275 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
0276 );
0277 #define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name) \
0278 DEFINE_EVENT(xen_mmu_ptep_modify_prot, name, \
0279 TP_PROTO(struct mm_struct *mm, unsigned long addr, \
0280 pte_t *ptep, pte_t pteval), \
0281 TP_ARGS(mm, addr, ptep, pteval))
0282
0283 DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start);
0284 DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit);
0285
0286 TRACE_EVENT(xen_mmu_alloc_ptpage,
0287 TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
0288 TP_ARGS(mm, pfn, level, pinned),
0289 TP_STRUCT__entry(
0290 __field(struct mm_struct *, mm)
0291 __field(unsigned long, pfn)
0292 __field(unsigned, level)
0293 __field(bool, pinned)
0294 ),
0295 TP_fast_assign(__entry->mm = mm;
0296 __entry->pfn = pfn;
0297 __entry->level = level;
0298 __entry->pinned = pinned),
0299 TP_printk("mm %p pfn %lx level %d %spinned",
0300 __entry->mm, __entry->pfn, __entry->level,
0301 __entry->pinned ? "" : "un")
0302 );
0303
0304 TRACE_EVENT(xen_mmu_release_ptpage,
0305 TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
0306 TP_ARGS(pfn, level, pinned),
0307 TP_STRUCT__entry(
0308 __field(unsigned long, pfn)
0309 __field(unsigned, level)
0310 __field(bool, pinned)
0311 ),
0312 TP_fast_assign(__entry->pfn = pfn;
0313 __entry->level = level;
0314 __entry->pinned = pinned),
0315 TP_printk("pfn %lx level %d %spinned",
0316 __entry->pfn, __entry->level,
0317 __entry->pinned ? "" : "un")
0318 );
0319
0320 DECLARE_EVENT_CLASS(xen_mmu_pgd,
0321 TP_PROTO(struct mm_struct *mm, pgd_t *pgd),
0322 TP_ARGS(mm, pgd),
0323 TP_STRUCT__entry(
0324 __field(struct mm_struct *, mm)
0325 __field(pgd_t *, pgd)
0326 ),
0327 TP_fast_assign(__entry->mm = mm;
0328 __entry->pgd = pgd),
0329 TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd)
0330 );
0331 #define DEFINE_XEN_MMU_PGD_EVENT(name) \
0332 DEFINE_EVENT(xen_mmu_pgd, name, \
0333 TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \
0334 TP_ARGS(mm, pgd))
0335
0336 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
0337 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
0338
0339 TRACE_EVENT(xen_mmu_flush_tlb_one_user,
0340 TP_PROTO(unsigned long addr),
0341 TP_ARGS(addr),
0342 TP_STRUCT__entry(
0343 __field(unsigned long, addr)
0344 ),
0345 TP_fast_assign(__entry->addr = addr),
0346 TP_printk("addr %lx", __entry->addr)
0347 );
0348
0349 TRACE_EVENT(xen_mmu_flush_tlb_multi,
0350 TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
0351 unsigned long addr, unsigned long end),
0352 TP_ARGS(cpus, mm, addr, end),
0353 TP_STRUCT__entry(
0354 __field(unsigned, ncpus)
0355 __field(struct mm_struct *, mm)
0356 __field(unsigned long, addr)
0357 __field(unsigned long, end)
0358 ),
0359 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
0360 __entry->mm = mm;
0361 __entry->addr = addr,
0362 __entry->end = end),
0363 TP_printk("ncpus %d mm %p addr %lx, end %lx",
0364 __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
0365 );
0366
0367 TRACE_EVENT(xen_mmu_write_cr3,
0368 TP_PROTO(bool kernel, unsigned long cr3),
0369 TP_ARGS(kernel, cr3),
0370 TP_STRUCT__entry(
0371 __field(bool, kernel)
0372 __field(unsigned long, cr3)
0373 ),
0374 TP_fast_assign(__entry->kernel = kernel;
0375 __entry->cr3 = cr3),
0376 TP_printk("%s cr3 %lx",
0377 __entry->kernel ? "kernel" : "user", __entry->cr3)
0378 );
0379
0380
0381
0382 TRACE_EVENT(xen_cpu_write_ldt_entry,
0383 TP_PROTO(struct desc_struct *dt, int entrynum, u64 desc),
0384 TP_ARGS(dt, entrynum, desc),
0385 TP_STRUCT__entry(
0386 __field(struct desc_struct *, dt)
0387 __field(int, entrynum)
0388 __field(u64, desc)
0389 ),
0390 TP_fast_assign(__entry->dt = dt;
0391 __entry->entrynum = entrynum;
0392 __entry->desc = desc;
0393 ),
0394 TP_printk("dt %p entrynum %d entry %016llx",
0395 __entry->dt, __entry->entrynum,
0396 (unsigned long long)__entry->desc)
0397 );
0398
0399 TRACE_EVENT(xen_cpu_write_idt_entry,
0400 TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent),
0401 TP_ARGS(dt, entrynum, ent),
0402 TP_STRUCT__entry(
0403 __field(gate_desc *, dt)
0404 __field(int, entrynum)
0405 ),
0406 TP_fast_assign(__entry->dt = dt;
0407 __entry->entrynum = entrynum;
0408 ),
0409 TP_printk("dt %p entrynum %d",
0410 __entry->dt, __entry->entrynum)
0411 );
0412
0413 TRACE_EVENT(xen_cpu_load_idt,
0414 TP_PROTO(const struct desc_ptr *desc),
0415 TP_ARGS(desc),
0416 TP_STRUCT__entry(
0417 __field(unsigned long, addr)
0418 ),
0419 TP_fast_assign(__entry->addr = desc->address),
0420 TP_printk("addr %lx", __entry->addr)
0421 );
0422
0423 TRACE_EVENT(xen_cpu_write_gdt_entry,
0424 TP_PROTO(struct desc_struct *dt, int entrynum, const void *desc, int type),
0425 TP_ARGS(dt, entrynum, desc, type),
0426 TP_STRUCT__entry(
0427 __field(u64, desc)
0428 __field(struct desc_struct *, dt)
0429 __field(int, entrynum)
0430 __field(int, type)
0431 ),
0432 TP_fast_assign(__entry->dt = dt;
0433 __entry->entrynum = entrynum;
0434 __entry->desc = *(u64 *)desc;
0435 __entry->type = type;
0436 ),
0437 TP_printk("dt %p entrynum %d type %d desc %016llx",
0438 __entry->dt, __entry->entrynum, __entry->type,
0439 (unsigned long long)__entry->desc)
0440 );
0441
0442 TRACE_EVENT(xen_cpu_set_ldt,
0443 TP_PROTO(const void *addr, unsigned entries),
0444 TP_ARGS(addr, entries),
0445 TP_STRUCT__entry(
0446 __field(const void *, addr)
0447 __field(unsigned, entries)
0448 ),
0449 TP_fast_assign(__entry->addr = addr;
0450 __entry->entries = entries),
0451 TP_printk("addr %p entries %u",
0452 __entry->addr, __entry->entries)
0453 );
0454
0455
0456 #endif
0457
0458
0459 #include <trace/define_trace.h>