Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 
0003 #undef TRACE_SYSTEM
0004 #define TRACE_SYSTEM i915
0005 
0006 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
0007 #define _I915_TRACE_H_
0008 
0009 #include <linux/stringify.h>
0010 #include <linux/types.h>
0011 #include <linux/tracepoint.h>
0012 
0013 #include <drm/drm_drv.h>
0014 
0015 #include "gt/intel_engine.h"
0016 
0017 #include "i915_drv.h"
0018 #include "i915_irq.h"
0019 
0020 /* object tracking */
0021 
0022 TRACE_EVENT(i915_gem_object_create,
0023         TP_PROTO(struct drm_i915_gem_object *obj),
0024         TP_ARGS(obj),
0025 
0026         TP_STRUCT__entry(
0027                  __field(struct drm_i915_gem_object *, obj)
0028                  __field(u64, size)
0029                  ),
0030 
0031         TP_fast_assign(
0032                __entry->obj = obj;
0033                __entry->size = obj->base.size;
0034                ),
0035 
0036         TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
0037 );
0038 
0039 TRACE_EVENT(i915_gem_shrink,
0040         TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
0041         TP_ARGS(i915, target, flags),
0042 
0043         TP_STRUCT__entry(
0044                  __field(int, dev)
0045                  __field(unsigned long, target)
0046                  __field(unsigned, flags)
0047                  ),
0048 
0049         TP_fast_assign(
0050                __entry->dev = i915->drm.primary->index;
0051                __entry->target = target;
0052                __entry->flags = flags;
0053                ),
0054 
0055         TP_printk("dev=%d, target=%lu, flags=%x",
0056               __entry->dev, __entry->target, __entry->flags)
0057 );
0058 
0059 TRACE_EVENT(i915_vma_bind,
0060         TP_PROTO(struct i915_vma *vma, unsigned flags),
0061         TP_ARGS(vma, flags),
0062 
0063         TP_STRUCT__entry(
0064                  __field(struct drm_i915_gem_object *, obj)
0065                  __field(struct i915_address_space *, vm)
0066                  __field(u64, offset)
0067                  __field(u64, size)
0068                  __field(unsigned, flags)
0069                  ),
0070 
0071         TP_fast_assign(
0072                __entry->obj = vma->obj;
0073                __entry->vm = vma->vm;
0074                __entry->offset = vma->node.start;
0075                __entry->size = vma->node.size;
0076                __entry->flags = flags;
0077                ),
0078 
0079         TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
0080               __entry->obj, __entry->offset, __entry->size,
0081               __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
0082               __entry->vm)
0083 );
0084 
0085 TRACE_EVENT(i915_vma_unbind,
0086         TP_PROTO(struct i915_vma *vma),
0087         TP_ARGS(vma),
0088 
0089         TP_STRUCT__entry(
0090                  __field(struct drm_i915_gem_object *, obj)
0091                  __field(struct i915_address_space *, vm)
0092                  __field(u64, offset)
0093                  __field(u64, size)
0094                  ),
0095 
0096         TP_fast_assign(
0097                __entry->obj = vma->obj;
0098                __entry->vm = vma->vm;
0099                __entry->offset = vma->node.start;
0100                __entry->size = vma->node.size;
0101                ),
0102 
0103         TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
0104               __entry->obj, __entry->offset, __entry->size, __entry->vm)
0105 );
0106 
0107 TRACE_EVENT(i915_gem_object_pwrite,
0108         TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
0109         TP_ARGS(obj, offset, len),
0110 
0111         TP_STRUCT__entry(
0112                  __field(struct drm_i915_gem_object *, obj)
0113                  __field(u64, offset)
0114                  __field(u64, len)
0115                  ),
0116 
0117         TP_fast_assign(
0118                __entry->obj = obj;
0119                __entry->offset = offset;
0120                __entry->len = len;
0121                ),
0122 
0123         TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
0124               __entry->obj, __entry->offset, __entry->len)
0125 );
0126 
0127 TRACE_EVENT(i915_gem_object_pread,
0128         TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
0129         TP_ARGS(obj, offset, len),
0130 
0131         TP_STRUCT__entry(
0132                  __field(struct drm_i915_gem_object *, obj)
0133                  __field(u64, offset)
0134                  __field(u64, len)
0135                  ),
0136 
0137         TP_fast_assign(
0138                __entry->obj = obj;
0139                __entry->offset = offset;
0140                __entry->len = len;
0141                ),
0142 
0143         TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
0144               __entry->obj, __entry->offset, __entry->len)
0145 );
0146 
0147 TRACE_EVENT(i915_gem_object_fault,
0148         TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
0149         TP_ARGS(obj, index, gtt, write),
0150 
0151         TP_STRUCT__entry(
0152                  __field(struct drm_i915_gem_object *, obj)
0153                  __field(u64, index)
0154                  __field(bool, gtt)
0155                  __field(bool, write)
0156                  ),
0157 
0158         TP_fast_assign(
0159                __entry->obj = obj;
0160                __entry->index = index;
0161                __entry->gtt = gtt;
0162                __entry->write = write;
0163                ),
0164 
0165         TP_printk("obj=%p, %s index=%llu %s",
0166               __entry->obj,
0167               __entry->gtt ? "GTT" : "CPU",
0168               __entry->index,
0169               __entry->write ? ", writable" : "")
0170 );
0171 
0172 DECLARE_EVENT_CLASS(i915_gem_object,
0173         TP_PROTO(struct drm_i915_gem_object *obj),
0174         TP_ARGS(obj),
0175 
0176         TP_STRUCT__entry(
0177                  __field(struct drm_i915_gem_object *, obj)
0178                  ),
0179 
0180         TP_fast_assign(
0181                __entry->obj = obj;
0182                ),
0183 
0184         TP_printk("obj=%p", __entry->obj)
0185 );
0186 
0187 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
0188          TP_PROTO(struct drm_i915_gem_object *obj),
0189          TP_ARGS(obj)
0190 );
0191 
0192 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
0193         TP_PROTO(struct drm_i915_gem_object *obj),
0194         TP_ARGS(obj)
0195 );
0196 
0197 TRACE_EVENT(i915_gem_evict,
0198         TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
0199         TP_ARGS(vm, size, align, flags),
0200 
0201         TP_STRUCT__entry(
0202                  __field(u32, dev)
0203                  __field(struct i915_address_space *, vm)
0204                  __field(u64, size)
0205                  __field(u64, align)
0206                  __field(unsigned int, flags)
0207                 ),
0208 
0209         TP_fast_assign(
0210                __entry->dev = vm->i915->drm.primary->index;
0211                __entry->vm = vm;
0212                __entry->size = size;
0213                __entry->align = align;
0214                __entry->flags = flags;
0215               ),
0216 
0217         TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
0218               __entry->dev, __entry->vm, __entry->size, __entry->align,
0219               __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
0220 );
0221 
0222 TRACE_EVENT(i915_gem_evict_node,
0223         TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
0224         TP_ARGS(vm, node, flags),
0225 
0226         TP_STRUCT__entry(
0227                  __field(u32, dev)
0228                  __field(struct i915_address_space *, vm)
0229                  __field(u64, start)
0230                  __field(u64, size)
0231                  __field(unsigned long, color)
0232                  __field(unsigned int, flags)
0233                 ),
0234 
0235         TP_fast_assign(
0236                __entry->dev = vm->i915->drm.primary->index;
0237                __entry->vm = vm;
0238                __entry->start = node->start;
0239                __entry->size = node->size;
0240                __entry->color = node->color;
0241                __entry->flags = flags;
0242               ),
0243 
0244         TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
0245               __entry->dev, __entry->vm,
0246               __entry->start, __entry->size,
0247               __entry->color, __entry->flags)
0248 );
0249 
0250 TRACE_EVENT(i915_gem_evict_vm,
0251         TP_PROTO(struct i915_address_space *vm),
0252         TP_ARGS(vm),
0253 
0254         TP_STRUCT__entry(
0255                  __field(u32, dev)
0256                  __field(struct i915_address_space *, vm)
0257                 ),
0258 
0259         TP_fast_assign(
0260                __entry->dev = vm->i915->drm.primary->index;
0261                __entry->vm = vm;
0262               ),
0263 
0264         TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
0265 );
0266 
0267 TRACE_EVENT(i915_request_queue,
0268         TP_PROTO(struct i915_request *rq, u32 flags),
0269         TP_ARGS(rq, flags),
0270 
0271         TP_STRUCT__entry(
0272                  __field(u32, dev)
0273                  __field(u64, ctx)
0274                  __field(u16, class)
0275                  __field(u16, instance)
0276                  __field(u32, seqno)
0277                  __field(u32, flags)
0278                  ),
0279 
0280         TP_fast_assign(
0281                __entry->dev = rq->engine->i915->drm.primary->index;
0282                __entry->class = rq->engine->uabi_class;
0283                __entry->instance = rq->engine->uabi_instance;
0284                __entry->ctx = rq->fence.context;
0285                __entry->seqno = rq->fence.seqno;
0286                __entry->flags = flags;
0287                ),
0288 
0289         TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
0290               __entry->dev, __entry->class, __entry->instance,
0291               __entry->ctx, __entry->seqno, __entry->flags)
0292 );
0293 
0294 DECLARE_EVENT_CLASS(i915_request,
0295         TP_PROTO(struct i915_request *rq),
0296         TP_ARGS(rq),
0297 
0298         TP_STRUCT__entry(
0299                  __field(u32, dev)
0300                  __field(u64, ctx)
0301                  __field(u16, class)
0302                  __field(u16, instance)
0303                  __field(u32, seqno)
0304                  __field(u32, tail)
0305                  ),
0306 
0307         TP_fast_assign(
0308                __entry->dev = rq->engine->i915->drm.primary->index;
0309                __entry->class = rq->engine->uabi_class;
0310                __entry->instance = rq->engine->uabi_instance;
0311                __entry->ctx = rq->fence.context;
0312                __entry->seqno = rq->fence.seqno;
0313                __entry->tail = rq->tail;
0314                ),
0315 
0316         TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
0317               __entry->dev, __entry->class, __entry->instance,
0318               __entry->ctx, __entry->seqno, __entry->tail)
0319 );
0320 
0321 DEFINE_EVENT(i915_request, i915_request_add,
0322          TP_PROTO(struct i915_request *rq),
0323          TP_ARGS(rq)
0324 );
0325 
0326 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
0327 DEFINE_EVENT(i915_request, i915_request_guc_submit,
0328          TP_PROTO(struct i915_request *rq),
0329          TP_ARGS(rq)
0330 );
0331 
0332 DEFINE_EVENT(i915_request, i915_request_submit,
0333          TP_PROTO(struct i915_request *rq),
0334          TP_ARGS(rq)
0335 );
0336 
0337 DEFINE_EVENT(i915_request, i915_request_execute,
0338          TP_PROTO(struct i915_request *rq),
0339          TP_ARGS(rq)
0340 );
0341 
0342 TRACE_EVENT(i915_request_in,
0343         TP_PROTO(struct i915_request *rq, unsigned int port),
0344         TP_ARGS(rq, port),
0345 
0346         TP_STRUCT__entry(
0347                  __field(u32, dev)
0348                  __field(u64, ctx)
0349                  __field(u16, class)
0350                  __field(u16, instance)
0351                  __field(u32, seqno)
0352                  __field(u32, port)
0353                  __field(s32, prio)
0354                 ),
0355 
0356         TP_fast_assign(
0357                __entry->dev = rq->engine->i915->drm.primary->index;
0358                __entry->class = rq->engine->uabi_class;
0359                __entry->instance = rq->engine->uabi_instance;
0360                __entry->ctx = rq->fence.context;
0361                __entry->seqno = rq->fence.seqno;
0362                __entry->prio = rq->sched.attr.priority;
0363                __entry->port = port;
0364                ),
0365 
0366         TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u",
0367               __entry->dev, __entry->class, __entry->instance,
0368               __entry->ctx, __entry->seqno,
0369               __entry->prio, __entry->port)
0370 );
0371 
0372 TRACE_EVENT(i915_request_out,
0373         TP_PROTO(struct i915_request *rq),
0374         TP_ARGS(rq),
0375 
0376         TP_STRUCT__entry(
0377                  __field(u32, dev)
0378                  __field(u64, ctx)
0379                  __field(u16, class)
0380                  __field(u16, instance)
0381                  __field(u32, seqno)
0382                  __field(u32, completed)
0383                 ),
0384 
0385         TP_fast_assign(
0386                __entry->dev = rq->engine->i915->drm.primary->index;
0387                __entry->class = rq->engine->uabi_class;
0388                __entry->instance = rq->engine->uabi_instance;
0389                __entry->ctx = rq->fence.context;
0390                __entry->seqno = rq->fence.seqno;
0391                __entry->completed = i915_request_completed(rq);
0392                ),
0393 
0394             TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
0395                   __entry->dev, __entry->class, __entry->instance,
0396                   __entry->ctx, __entry->seqno, __entry->completed)
0397 );
0398 
0399 DECLARE_EVENT_CLASS(intel_context,
0400             TP_PROTO(struct intel_context *ce),
0401             TP_ARGS(ce),
0402 
0403             TP_STRUCT__entry(
0404                  __field(u32, guc_id)
0405                  __field(int, pin_count)
0406                  __field(u32, sched_state)
0407                  __field(u8, guc_prio)
0408                  ),
0409 
0410             TP_fast_assign(
0411                __entry->guc_id = ce->guc_id.id;
0412                __entry->pin_count = atomic_read(&ce->pin_count);
0413                __entry->sched_state = ce->guc_state.sched_state;
0414                __entry->guc_prio = ce->guc_state.prio;
0415                ),
0416 
0417             TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
0418                   __entry->guc_id, __entry->pin_count,
0419                   __entry->sched_state,
0420                   __entry->guc_prio)
0421 );
0422 
0423 DEFINE_EVENT(intel_context, intel_context_set_prio,
0424          TP_PROTO(struct intel_context *ce),
0425          TP_ARGS(ce)
0426 );
0427 
0428 DEFINE_EVENT(intel_context, intel_context_reset,
0429          TP_PROTO(struct intel_context *ce),
0430          TP_ARGS(ce)
0431 );
0432 
0433 DEFINE_EVENT(intel_context, intel_context_ban,
0434          TP_PROTO(struct intel_context *ce),
0435          TP_ARGS(ce)
0436 );
0437 
0438 DEFINE_EVENT(intel_context, intel_context_register,
0439          TP_PROTO(struct intel_context *ce),
0440          TP_ARGS(ce)
0441 );
0442 
0443 DEFINE_EVENT(intel_context, intel_context_deregister,
0444          TP_PROTO(struct intel_context *ce),
0445          TP_ARGS(ce)
0446 );
0447 
0448 DEFINE_EVENT(intel_context, intel_context_deregister_done,
0449          TP_PROTO(struct intel_context *ce),
0450          TP_ARGS(ce)
0451 );
0452 
0453 DEFINE_EVENT(intel_context, intel_context_sched_enable,
0454          TP_PROTO(struct intel_context *ce),
0455          TP_ARGS(ce)
0456 );
0457 
0458 DEFINE_EVENT(intel_context, intel_context_sched_disable,
0459          TP_PROTO(struct intel_context *ce),
0460          TP_ARGS(ce)
0461 );
0462 
0463 DEFINE_EVENT(intel_context, intel_context_sched_done,
0464          TP_PROTO(struct intel_context *ce),
0465          TP_ARGS(ce)
0466 );
0467 
0468 DEFINE_EVENT(intel_context, intel_context_create,
0469          TP_PROTO(struct intel_context *ce),
0470          TP_ARGS(ce)
0471 );
0472 
0473 DEFINE_EVENT(intel_context, intel_context_fence_release,
0474          TP_PROTO(struct intel_context *ce),
0475          TP_ARGS(ce)
0476 );
0477 
0478 DEFINE_EVENT(intel_context, intel_context_free,
0479          TP_PROTO(struct intel_context *ce),
0480          TP_ARGS(ce)
0481 );
0482 
0483 DEFINE_EVENT(intel_context, intel_context_steal_guc_id,
0484          TP_PROTO(struct intel_context *ce),
0485          TP_ARGS(ce)
0486 );
0487 
0488 DEFINE_EVENT(intel_context, intel_context_do_pin,
0489          TP_PROTO(struct intel_context *ce),
0490          TP_ARGS(ce)
0491 );
0492 
0493 DEFINE_EVENT(intel_context, intel_context_do_unpin,
0494          TP_PROTO(struct intel_context *ce),
0495          TP_ARGS(ce)
0496 );
0497 
0498 #else
0499 #if !defined(TRACE_HEADER_MULTI_READ)
0500 static inline void
0501 trace_i915_request_guc_submit(struct i915_request *rq)
0502 {
0503 }
0504 
0505 static inline void
0506 trace_i915_request_submit(struct i915_request *rq)
0507 {
0508 }
0509 
0510 static inline void
0511 trace_i915_request_execute(struct i915_request *rq)
0512 {
0513 }
0514 
0515 static inline void
0516 trace_i915_request_in(struct i915_request *rq, unsigned int port)
0517 {
0518 }
0519 
0520 static inline void
0521 trace_i915_request_out(struct i915_request *rq)
0522 {
0523 }
0524 
0525 static inline void
0526 trace_intel_context_set_prio(struct intel_context *ce)
0527 {
0528 }
0529 
0530 static inline void
0531 trace_intel_context_reset(struct intel_context *ce)
0532 {
0533 }
0534 
0535 static inline void
0536 trace_intel_context_ban(struct intel_context *ce)
0537 {
0538 }
0539 
0540 static inline void
0541 trace_intel_context_register(struct intel_context *ce)
0542 {
0543 }
0544 
0545 static inline void
0546 trace_intel_context_deregister(struct intel_context *ce)
0547 {
0548 }
0549 
0550 static inline void
0551 trace_intel_context_deregister_done(struct intel_context *ce)
0552 {
0553 }
0554 
0555 static inline void
0556 trace_intel_context_sched_enable(struct intel_context *ce)
0557 {
0558 }
0559 
0560 static inline void
0561 trace_intel_context_sched_disable(struct intel_context *ce)
0562 {
0563 }
0564 
0565 static inline void
0566 trace_intel_context_sched_done(struct intel_context *ce)
0567 {
0568 }
0569 
0570 static inline void
0571 trace_intel_context_create(struct intel_context *ce)
0572 {
0573 }
0574 
0575 static inline void
0576 trace_intel_context_fence_release(struct intel_context *ce)
0577 {
0578 }
0579 
0580 static inline void
0581 trace_intel_context_free(struct intel_context *ce)
0582 {
0583 }
0584 
0585 static inline void
0586 trace_intel_context_steal_guc_id(struct intel_context *ce)
0587 {
0588 }
0589 
0590 static inline void
0591 trace_intel_context_do_pin(struct intel_context *ce)
0592 {
0593 }
0594 
0595 static inline void
0596 trace_intel_context_do_unpin(struct intel_context *ce)
0597 {
0598 }
0599 #endif
0600 #endif
0601 
0602 DEFINE_EVENT(i915_request, i915_request_retire,
0603         TP_PROTO(struct i915_request *rq),
0604         TP_ARGS(rq)
0605 );
0606 
0607 TRACE_EVENT(i915_request_wait_begin,
0608         TP_PROTO(struct i915_request *rq, unsigned int flags),
0609         TP_ARGS(rq, flags),
0610 
0611         TP_STRUCT__entry(
0612                  __field(u32, dev)
0613                  __field(u64, ctx)
0614                  __field(u16, class)
0615                  __field(u16, instance)
0616                  __field(u32, seqno)
0617                  __field(unsigned int, flags)
0618                  ),
0619 
0620         /* NB: the blocking information is racy since mutex_is_locked
0621          * doesn't check that the current thread holds the lock. The only
0622          * other option would be to pass the boolean information of whether
0623          * or not the class was blocking down through the stack which is
0624          * less desirable.
0625          */
0626         TP_fast_assign(
0627                __entry->dev = rq->engine->i915->drm.primary->index;
0628                __entry->class = rq->engine->uabi_class;
0629                __entry->instance = rq->engine->uabi_instance;
0630                __entry->ctx = rq->fence.context;
0631                __entry->seqno = rq->fence.seqno;
0632                __entry->flags = flags;
0633                ),
0634 
0635         TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
0636               __entry->dev, __entry->class, __entry->instance,
0637               __entry->ctx, __entry->seqno,
0638               __entry->flags)
0639 );
0640 
0641 DEFINE_EVENT(i915_request, i915_request_wait_end,
0642         TP_PROTO(struct i915_request *rq),
0643         TP_ARGS(rq)
0644 );
0645 
0646 TRACE_EVENT_CONDITION(i915_reg_rw,
0647     TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
0648 
0649     TP_ARGS(write, reg, val, len, trace),
0650 
0651     TP_CONDITION(trace),
0652 
0653     TP_STRUCT__entry(
0654         __field(u64, val)
0655         __field(u32, reg)
0656         __field(u16, write)
0657         __field(u16, len)
0658         ),
0659 
0660     TP_fast_assign(
0661         __entry->val = (u64)val;
0662         __entry->reg = i915_mmio_reg_offset(reg);
0663         __entry->write = write;
0664         __entry->len = len;
0665         ),
0666 
0667     TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
0668         __entry->write ? "write" : "read",
0669         __entry->reg, __entry->len,
0670         (u32)(__entry->val & 0xffffffff),
0671         (u32)(__entry->val >> 32))
0672 );
0673 
0674 TRACE_EVENT(intel_gpu_freq_change,
0675         TP_PROTO(u32 freq),
0676         TP_ARGS(freq),
0677 
0678         TP_STRUCT__entry(
0679                  __field(u32, freq)
0680                  ),
0681 
0682         TP_fast_assign(
0683                __entry->freq = freq;
0684                ),
0685 
0686         TP_printk("new_freq=%u", __entry->freq)
0687 );
0688 
0689 /**
0690  * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
0691  *
0692  * With full ppgtt enabled each process using drm will allocate at least one
0693  * translation table. With these traces it is possible to keep track of the
0694  * allocation and of the lifetime of the tables; this can be used during
0695  * testing/debug to verify that we are not leaking ppgtts.
0696  * These traces identify the ppgtt through the vm pointer, which is also printed
0697  * by the i915_vma_bind and i915_vma_unbind tracepoints.
0698  */
0699 DECLARE_EVENT_CLASS(i915_ppgtt,
0700     TP_PROTO(struct i915_address_space *vm),
0701     TP_ARGS(vm),
0702 
0703     TP_STRUCT__entry(
0704             __field(struct i915_address_space *, vm)
0705             __field(u32, dev)
0706     ),
0707 
0708     TP_fast_assign(
0709             __entry->vm = vm;
0710             __entry->dev = vm->i915->drm.primary->index;
0711     ),
0712 
0713     TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
0714 )
0715 
0716 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
0717     TP_PROTO(struct i915_address_space *vm),
0718     TP_ARGS(vm)
0719 );
0720 
0721 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
0722     TP_PROTO(struct i915_address_space *vm),
0723     TP_ARGS(vm)
0724 );
0725 
0726 /**
0727  * DOC: i915_context_create and i915_context_free tracepoints
0728  *
0729  * These tracepoints are used to track creation and deletion of contexts.
0730  * If full ppgtt is enabled, they also print the address of the vm assigned to
0731  * the context.
0732  */
0733 DECLARE_EVENT_CLASS(i915_context,
0734     TP_PROTO(struct i915_gem_context *ctx),
0735     TP_ARGS(ctx),
0736 
0737     TP_STRUCT__entry(
0738             __field(u32, dev)
0739             __field(struct i915_gem_context *, ctx)
0740             __field(struct i915_address_space *, vm)
0741     ),
0742 
0743     TP_fast_assign(
0744             __entry->dev = ctx->i915->drm.primary->index;
0745             __entry->ctx = ctx;
0746             __entry->vm = ctx->vm;
0747     ),
0748 
0749     TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
0750           __entry->dev, __entry->ctx, __entry->vm)
0751 )
0752 
0753 DEFINE_EVENT(i915_context, i915_context_create,
0754     TP_PROTO(struct i915_gem_context *ctx),
0755     TP_ARGS(ctx)
0756 );
0757 
0758 DEFINE_EVENT(i915_context, i915_context_free,
0759     TP_PROTO(struct i915_gem_context *ctx),
0760     TP_ARGS(ctx)
0761 );
0762 
0763 #endif /* _I915_TRACE_H_ */
0764 
0765 /* This part must be outside protection */
0766 #undef TRACE_INCLUDE_PATH
0767 #undef TRACE_INCLUDE_FILE
0768 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
0769 #define TRACE_INCLUDE_FILE i915_trace
0770 #include <trace/define_trace.h>