0001
0002
0003
0004
0005 #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
0006 #define __HFI1_TRACE_TX_H
0007
0008 #include <linux/tracepoint.h>
0009 #include <linux/trace_seq.h>
0010
0011 #include "hfi.h"
0012 #include "mad.h"
0013 #include "sdma.h"
0014 #include "ipoib.h"
0015 #include "user_sdma.h"
0016
0017 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
0018
0019 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
0020
0021 #undef TRACE_SYSTEM
0022 #define TRACE_SYSTEM hfi1_tx
0023
0024 TRACE_EVENT(hfi1_piofree,
0025 TP_PROTO(struct send_context *sc, int extra),
0026 TP_ARGS(sc, extra),
0027 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
0028 __field(u32, sw_index)
0029 __field(u32, hw_context)
0030 __field(int, extra)
0031 ),
0032 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
0033 __entry->sw_index = sc->sw_index;
0034 __entry->hw_context = sc->hw_context;
0035 __entry->extra = extra;
0036 ),
0037 TP_printk("[%s] ctxt %u(%u) extra %d",
0038 __get_str(dev),
0039 __entry->sw_index,
0040 __entry->hw_context,
0041 __entry->extra
0042 )
0043 );
0044
0045 TRACE_EVENT(hfi1_wantpiointr,
0046 TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
0047 TP_ARGS(sc, needint, credit_ctrl),
0048 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
0049 __field(u32, sw_index)
0050 __field(u32, hw_context)
0051 __field(u32, needint)
0052 __field(u64, credit_ctrl)
0053 ),
0054 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
0055 __entry->sw_index = sc->sw_index;
0056 __entry->hw_context = sc->hw_context;
0057 __entry->needint = needint;
0058 __entry->credit_ctrl = credit_ctrl;
0059 ),
0060 TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
0061 __get_str(dev),
0062 __entry->sw_index,
0063 __entry->hw_context,
0064 __entry->needint,
0065 (unsigned long long)__entry->credit_ctrl
0066 )
0067 );
0068
0069 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
0070 TP_PROTO(struct rvt_qp *qp, u32 flags),
0071 TP_ARGS(qp, flags),
0072 TP_STRUCT__entry(
0073 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
0074 __field(u32, qpn)
0075 __field(u32, flags)
0076 __field(u32, s_flags)
0077 __field(u32, ps_flags)
0078 __field(unsigned long, iow_flags)
0079 ),
0080 TP_fast_assign(
0081 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
0082 __entry->flags = flags;
0083 __entry->qpn = qp->ibqp.qp_num;
0084 __entry->s_flags = qp->s_flags;
0085 __entry->ps_flags =
0086 ((struct hfi1_qp_priv *)qp->priv)->s_flags;
0087 __entry->iow_flags =
0088 ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
0089 ),
0090 TP_printk(
0091 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
0092 __get_str(dev),
0093 __entry->qpn,
0094 __entry->flags,
0095 __entry->s_flags,
0096 __entry->ps_flags,
0097 __entry->iow_flags
0098 )
0099 );
0100
0101 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
0102 TP_PROTO(struct rvt_qp *qp, u32 flags),
0103 TP_ARGS(qp, flags));
0104
0105 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
0106 TP_PROTO(struct rvt_qp *qp, u32 flags),
0107 TP_ARGS(qp, flags));
0108
0109 TRACE_EVENT(hfi1_sdma_descriptor,
0110 TP_PROTO(struct sdma_engine *sde,
0111 u64 desc0,
0112 u64 desc1,
0113 u16 e,
0114 void *descp),
0115 TP_ARGS(sde, desc0, desc1, e, descp),
0116 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
0117 __field(void *, descp)
0118 __field(u64, desc0)
0119 __field(u64, desc1)
0120 __field(u16, e)
0121 __field(u8, idx)
0122 ),
0123 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
0124 __entry->desc0 = desc0;
0125 __entry->desc1 = desc1;
0126 __entry->idx = sde->this_idx;
0127 __entry->descp = descp;
0128 __entry->e = e;
0129 ),
0130 TP_printk(
0131 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
0132 __get_str(dev),
0133 __entry->idx,
0134 __parse_sdma_flags(__entry->desc0, __entry->desc1),
0135 (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
0136 SDMA_DESC0_PHY_ADDR_MASK,
0137 (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
0138 SDMA_DESC1_GENERATION_MASK),
0139 (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
0140 SDMA_DESC0_BYTE_COUNT_MASK),
0141 __entry->desc0,
0142 __entry->desc1,
0143 __entry->descp,
0144 __entry->e
0145 )
0146 );
0147
0148 TRACE_EVENT(hfi1_sdma_engine_select,
0149 TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
0150 TP_ARGS(dd, sel, vl, idx),
0151 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
0152 __field(u32, sel)
0153 __field(u8, vl)
0154 __field(u8, idx)
0155 ),
0156 TP_fast_assign(DD_DEV_ASSIGN(dd);
0157 __entry->sel = sel;
0158 __entry->vl = vl;
0159 __entry->idx = idx;
0160 ),
0161 TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
0162 __get_str(dev),
0163 __entry->idx,
0164 __entry->sel,
0165 __entry->vl
0166 )
0167 );
0168
0169 TRACE_EVENT(hfi1_sdma_user_free_queues,
0170 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
0171 TP_ARGS(dd, ctxt, subctxt),
0172 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
0173 __field(u16, ctxt)
0174 __field(u16, subctxt)
0175 ),
0176 TP_fast_assign(DD_DEV_ASSIGN(dd);
0177 __entry->ctxt = ctxt;
0178 __entry->subctxt = subctxt;
0179 ),
0180 TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
0181 __get_str(dev),
0182 __entry->ctxt,
0183 __entry->subctxt
0184 )
0185 );
0186
0187 TRACE_EVENT(hfi1_sdma_user_process_request,
0188 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
0189 u16 comp_idx),
0190 TP_ARGS(dd, ctxt, subctxt, comp_idx),
0191 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
0192 __field(u16, ctxt)
0193 __field(u16, subctxt)
0194 __field(u16, comp_idx)
0195 ),
0196 TP_fast_assign(DD_DEV_ASSIGN(dd);
0197 __entry->ctxt = ctxt;
0198 __entry->subctxt = subctxt;
0199 __entry->comp_idx = comp_idx;
0200 ),
0201 TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
0202 __get_str(dev),
0203 __entry->ctxt,
0204 __entry->subctxt,
0205 __entry->comp_idx
0206 )
0207 );
0208
0209 DECLARE_EVENT_CLASS(
0210 hfi1_sdma_value_template,
0211 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
0212 u32 value),
0213 TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
0214 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
0215 __field(u16, ctxt)
0216 __field(u16, subctxt)
0217 __field(u16, comp_idx)
0218 __field(u32, value)
0219 ),
0220 TP_fast_assign(DD_DEV_ASSIGN(dd);
0221 __entry->ctxt = ctxt;
0222 __entry->subctxt = subctxt;
0223 __entry->comp_idx = comp_idx;
0224 __entry->value = value;
0225 ),
0226 TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
0227 __get_str(dev),
0228 __entry->ctxt,
0229 __entry->subctxt,
0230 __entry->comp_idx,
0231 __entry->value
0232 )
0233 );
0234
0235 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
0236 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
0237 u16 comp_idx, u32 tidoffset),
0238 TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
0239
0240 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
0241 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
0242 u16 comp_idx, u32 data_len),
0243 TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
0244
0245 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
0246 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
0247 u16 comp_idx, u32 data_len),
0248 TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
0249
0250 TRACE_EVENT(hfi1_sdma_user_tid_info,
0251 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
0252 u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
0253 TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
0254 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
0255 __field(u16, ctxt)
0256 __field(u16, subctxt)
0257 __field(u16, comp_idx)
0258 __field(u32, tidoffset)
0259 __field(u32, units)
0260 __field(u8, shift)
0261 ),
0262 TP_fast_assign(DD_DEV_ASSIGN(dd);
0263 __entry->ctxt = ctxt;
0264 __entry->subctxt = subctxt;
0265 __entry->comp_idx = comp_idx;
0266 __entry->tidoffset = tidoffset;
0267 __entry->units = units;
0268 __entry->shift = shift;
0269 ),
0270 TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
0271 __get_str(dev),
0272 __entry->ctxt,
0273 __entry->subctxt,
0274 __entry->comp_idx,
0275 __entry->tidoffset,
0276 __entry->units,
0277 __entry->shift
0278 )
0279 );
0280
0281 TRACE_EVENT(hfi1_sdma_request,
0282 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
0283 unsigned long dim),
0284 TP_ARGS(dd, ctxt, subctxt, dim),
0285 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
0286 __field(u16, ctxt)
0287 __field(u16, subctxt)
0288 __field(unsigned long, dim)
0289 ),
0290 TP_fast_assign(DD_DEV_ASSIGN(dd);
0291 __entry->ctxt = ctxt;
0292 __entry->subctxt = subctxt;
0293 __entry->dim = dim;
0294 ),
0295 TP_printk("[%s] SDMA from %u:%u (%lu)",
0296 __get_str(dev),
0297 __entry->ctxt,
0298 __entry->subctxt,
0299 __entry->dim
0300 )
0301 );
0302
0303 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
0304 TP_PROTO(struct sdma_engine *sde, u64 status),
0305 TP_ARGS(sde, status),
0306 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
0307 __field(u64, status)
0308 __field(u8, idx)
0309 ),
0310 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
0311 __entry->status = status;
0312 __entry->idx = sde->this_idx;
0313 ),
0314 TP_printk("[%s] SDE(%u) status %llx",
0315 __get_str(dev),
0316 __entry->idx,
0317 (unsigned long long)__entry->status
0318 )
0319 );
0320
0321 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
0322 TP_PROTO(struct sdma_engine *sde, u64 status),
0323 TP_ARGS(sde, status)
0324 );
0325
0326 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
0327 TP_PROTO(struct sdma_engine *sde, u64 status),
0328 TP_ARGS(sde, status)
0329 );
0330
0331 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
0332 TP_PROTO(struct sdma_engine *sde, int aidx),
0333 TP_ARGS(sde, aidx),
0334 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
0335 __field(int, aidx)
0336 __field(u8, idx)
0337 ),
0338 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
0339 __entry->idx = sde->this_idx;
0340 __entry->aidx = aidx;
0341 ),
0342 TP_printk("[%s] SDE(%u) aidx %d",
0343 __get_str(dev),
0344 __entry->idx,
0345 __entry->aidx
0346 )
0347 );
0348
0349 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
0350 TP_PROTO(struct sdma_engine *sde, int aidx),
0351 TP_ARGS(sde, aidx));
0352
0353 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
0354 TP_PROTO(struct sdma_engine *sde, int aidx),
0355 TP_ARGS(sde, aidx));
0356
0357 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
0358 TRACE_EVENT(hfi1_sdma_progress,
0359 TP_PROTO(struct sdma_engine *sde,
0360 u16 hwhead,
0361 u16 swhead,
0362 struct sdma_txreq *txp
0363 ),
0364 TP_ARGS(sde, hwhead, swhead, txp),
0365 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
0366 __field(u64, sn)
0367 __field(u16, hwhead)
0368 __field(u16, swhead)
0369 __field(u16, txnext)
0370 __field(u16, tx_tail)
0371 __field(u16, tx_head)
0372 __field(u8, idx)
0373 ),
0374 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
0375 __entry->hwhead = hwhead;
0376 __entry->swhead = swhead;
0377 __entry->tx_tail = sde->tx_tail;
0378 __entry->tx_head = sde->tx_head;
0379 __entry->txnext = txp ? txp->next_descq_idx : ~0;
0380 __entry->idx = sde->this_idx;
0381 __entry->sn = txp ? txp->sn : ~0;
0382 ),
0383 TP_printk(
0384 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
0385 __get_str(dev),
0386 __entry->idx,
0387 __entry->sn,
0388 __entry->hwhead,
0389 __entry->swhead,
0390 __entry->txnext,
0391 __entry->tx_head,
0392 __entry->tx_tail
0393 )
0394 );
0395 #else
0396 TRACE_EVENT(hfi1_sdma_progress,
0397 TP_PROTO(struct sdma_engine *sde,
0398 u16 hwhead, u16 swhead,
0399 struct sdma_txreq *txp
0400 ),
0401 TP_ARGS(sde, hwhead, swhead, txp),
0402 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
0403 __field(u16, hwhead)
0404 __field(u16, swhead)
0405 __field(u16, txnext)
0406 __field(u16, tx_tail)
0407 __field(u16, tx_head)
0408 __field(u8, idx)
0409 ),
0410 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
0411 __entry->hwhead = hwhead;
0412 __entry->swhead = swhead;
0413 __entry->tx_tail = sde->tx_tail;
0414 __entry->tx_head = sde->tx_head;
0415 __entry->txnext = txp ? txp->next_descq_idx : ~0;
0416 __entry->idx = sde->this_idx;
0417 ),
0418 TP_printk(
0419 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
0420 __get_str(dev),
0421 __entry->idx,
0422 __entry->hwhead,
0423 __entry->swhead,
0424 __entry->txnext,
0425 __entry->tx_head,
0426 __entry->tx_tail
0427 )
0428 );
0429 #endif
0430
0431 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
0432 TP_PROTO(struct sdma_engine *sde, u64 sn),
0433 TP_ARGS(sde, sn),
0434 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
0435 __field(u64, sn)
0436 __field(u8, idx)
0437 ),
0438 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
0439 __entry->sn = sn;
0440 __entry->idx = sde->this_idx;
0441 ),
0442 TP_printk("[%s] SDE(%u) sn %llu",
0443 __get_str(dev),
0444 __entry->idx,
0445 __entry->sn
0446 )
0447 );
0448
0449 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
0450 TP_PROTO(
0451 struct sdma_engine *sde,
0452 u64 sn
0453 ),
0454 TP_ARGS(sde, sn)
0455 );
0456
0457 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
0458 TP_PROTO(struct sdma_engine *sde, u64 sn),
0459 TP_ARGS(sde, sn)
0460 );
0461
0462 #define USDMA_HDR_FORMAT \
0463 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
0464
0465 TRACE_EVENT(hfi1_sdma_user_header,
0466 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
0467 struct hfi1_pkt_header *hdr, u32 tidval),
0468 TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
0469 TP_STRUCT__entry(
0470 DD_DEV_ENTRY(dd)
0471 __field(u16, ctxt)
0472 __field(u8, subctxt)
0473 __field(u16, req)
0474 __field(u32, pbc0)
0475 __field(u32, pbc1)
0476 __field(u32, lrh0)
0477 __field(u32, lrh1)
0478 __field(u32, bth0)
0479 __field(u32, bth1)
0480 __field(u32, bth2)
0481 __field(u32, kdeth0)
0482 __field(u32, kdeth1)
0483 __field(u32, kdeth2)
0484 __field(u32, kdeth3)
0485 __field(u32, kdeth4)
0486 __field(u32, kdeth5)
0487 __field(u32, kdeth6)
0488 __field(u32, kdeth7)
0489 __field(u32, kdeth8)
0490 __field(u32, tidval)
0491 ),
0492 TP_fast_assign(
0493 __le32 *pbc = (__le32 *)hdr->pbc;
0494 __be32 *lrh = (__be32 *)hdr->lrh;
0495 __be32 *bth = (__be32 *)hdr->bth;
0496 __le32 *kdeth = (__le32 *)&hdr->kdeth;
0497
0498 DD_DEV_ASSIGN(dd);
0499 __entry->ctxt = ctxt;
0500 __entry->subctxt = subctxt;
0501 __entry->req = req;
0502 __entry->pbc0 = le32_to_cpu(pbc[0]);
0503 __entry->pbc1 = le32_to_cpu(pbc[1]);
0504 __entry->lrh0 = be32_to_cpu(lrh[0]);
0505 __entry->lrh1 = be32_to_cpu(lrh[1]);
0506 __entry->bth0 = be32_to_cpu(bth[0]);
0507 __entry->bth1 = be32_to_cpu(bth[1]);
0508 __entry->bth2 = be32_to_cpu(bth[2]);
0509 __entry->kdeth0 = le32_to_cpu(kdeth[0]);
0510 __entry->kdeth1 = le32_to_cpu(kdeth[1]);
0511 __entry->kdeth2 = le32_to_cpu(kdeth[2]);
0512 __entry->kdeth3 = le32_to_cpu(kdeth[3]);
0513 __entry->kdeth4 = le32_to_cpu(kdeth[4]);
0514 __entry->kdeth5 = le32_to_cpu(kdeth[5]);
0515 __entry->kdeth6 = le32_to_cpu(kdeth[6]);
0516 __entry->kdeth7 = le32_to_cpu(kdeth[7]);
0517 __entry->kdeth8 = le32_to_cpu(kdeth[8]);
0518 __entry->tidval = tidval;
0519 ),
0520 TP_printk(USDMA_HDR_FORMAT,
0521 __get_str(dev),
0522 __entry->ctxt,
0523 __entry->subctxt,
0524 __entry->req,
0525 __entry->pbc1,
0526 __entry->pbc0,
0527 __entry->lrh0,
0528 __entry->lrh1,
0529 __entry->bth0,
0530 __entry->bth1,
0531 __entry->bth2,
0532 __entry->kdeth0,
0533 __entry->kdeth1,
0534 __entry->kdeth2,
0535 __entry->kdeth3,
0536 __entry->kdeth4,
0537 __entry->kdeth5,
0538 __entry->kdeth6,
0539 __entry->kdeth7,
0540 __entry->kdeth8,
0541 __entry->tidval
0542 )
0543 );
0544
0545 #define SDMA_UREQ_FMT \
0546 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
0547 TRACE_EVENT(hfi1_sdma_user_reqinfo,
0548 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
0549 TP_ARGS(dd, ctxt, subctxt, i),
0550 TP_STRUCT__entry(
0551 DD_DEV_ENTRY(dd)
0552 __field(u16, ctxt)
0553 __field(u8, subctxt)
0554 __field(u8, ver_opcode)
0555 __field(u8, iovcnt)
0556 __field(u16, npkts)
0557 __field(u16, fragsize)
0558 __field(u16, comp_idx)
0559 ),
0560 TP_fast_assign(
0561 DD_DEV_ASSIGN(dd);
0562 __entry->ctxt = ctxt;
0563 __entry->subctxt = subctxt;
0564 __entry->ver_opcode = i[0] & 0xff;
0565 __entry->iovcnt = (i[0] >> 8) & 0xff;
0566 __entry->npkts = i[1];
0567 __entry->fragsize = i[2];
0568 __entry->comp_idx = i[3];
0569 ),
0570 TP_printk(SDMA_UREQ_FMT,
0571 __get_str(dev),
0572 __entry->ctxt,
0573 __entry->subctxt,
0574 __entry->ver_opcode,
0575 __entry->iovcnt,
0576 __entry->npkts,
0577 __entry->fragsize,
0578 __entry->comp_idx
0579 )
0580 );
0581
0582 #define usdma_complete_name(st) { st, #st }
0583 #define show_usdma_complete_state(st) \
0584 __print_symbolic(st, \
0585 usdma_complete_name(FREE), \
0586 usdma_complete_name(QUEUED), \
0587 usdma_complete_name(COMPLETE), \
0588 usdma_complete_name(ERROR))
0589
0590 TRACE_EVENT(hfi1_sdma_user_completion,
0591 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
0592 u8 state, int code),
0593 TP_ARGS(dd, ctxt, subctxt, idx, state, code),
0594 TP_STRUCT__entry(
0595 DD_DEV_ENTRY(dd)
0596 __field(u16, ctxt)
0597 __field(u8, subctxt)
0598 __field(u16, idx)
0599 __field(u8, state)
0600 __field(int, code)
0601 ),
0602 TP_fast_assign(
0603 DD_DEV_ASSIGN(dd);
0604 __entry->ctxt = ctxt;
0605 __entry->subctxt = subctxt;
0606 __entry->idx = idx;
0607 __entry->state = state;
0608 __entry->code = code;
0609 ),
0610 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
0611 __get_str(dev), __entry->ctxt, __entry->subctxt,
0612 __entry->idx, show_usdma_complete_state(__entry->state),
0613 __entry->code)
0614 );
0615
0616 TRACE_EVENT(hfi1_usdma_defer,
0617 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
0618 struct sdma_engine *sde,
0619 struct iowait *wait),
0620 TP_ARGS(pq, sde, wait),
0621 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
0622 __field(struct hfi1_user_sdma_pkt_q *, pq)
0623 __field(struct sdma_engine *, sde)
0624 __field(struct iowait *, wait)
0625 __field(int, engine)
0626 __field(int, empty)
0627 ),
0628 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
0629 __entry->pq = pq;
0630 __entry->sde = sde;
0631 __entry->wait = wait;
0632 __entry->engine = sde->this_idx;
0633 __entry->empty = list_empty(&__entry->wait->list);
0634 ),
0635 TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
0636 __get_str(dev),
0637 (unsigned long long)__entry->pq,
0638 (unsigned long long)__entry->sde,
0639 (unsigned long long)__entry->wait,
0640 __entry->engine,
0641 __entry->empty
0642 )
0643 );
0644
0645 TRACE_EVENT(hfi1_usdma_activate,
0646 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
0647 struct iowait *wait,
0648 int reason),
0649 TP_ARGS(pq, wait, reason),
0650 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
0651 __field(struct hfi1_user_sdma_pkt_q *, pq)
0652 __field(struct iowait *, wait)
0653 __field(int, reason)
0654 ),
0655 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
0656 __entry->pq = pq;
0657 __entry->wait = wait;
0658 __entry->reason = reason;
0659 ),
0660 TP_printk("[%s] pq %llx wait %llx reason %d",
0661 __get_str(dev),
0662 (unsigned long long)__entry->pq,
0663 (unsigned long long)__entry->wait,
0664 __entry->reason
0665 )
0666 );
0667
0668 TRACE_EVENT(hfi1_usdma_we,
0669 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
0670 int we_ret),
0671 TP_ARGS(pq, we_ret),
0672 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
0673 __field(struct hfi1_user_sdma_pkt_q *, pq)
0674 __field(int, state)
0675 __field(int, we_ret)
0676 ),
0677 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
0678 __entry->pq = pq;
0679 __entry->state = pq->state;
0680 __entry->we_ret = we_ret;
0681 ),
0682 TP_printk("[%s] pq %llx state %d we_ret %d",
0683 __get_str(dev),
0684 (unsigned long long)__entry->pq,
0685 __entry->state,
0686 __entry->we_ret
0687 )
0688 );
0689
0690 const char *print_u32_array(struct trace_seq *, u32 *, int);
0691 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
0692
0693 TRACE_EVENT(hfi1_sdma_user_header_ahg,
0694 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
0695 u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
0696 TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
0697 TP_STRUCT__entry(
0698 DD_DEV_ENTRY(dd)
0699 __field(u16, ctxt)
0700 __field(u8, subctxt)
0701 __field(u16, req)
0702 __field(u8, sde)
0703 __field(u8, idx)
0704 __field(int, len)
0705 __field(u32, tidval)
0706 __array(u32, ahg, 10)
0707 ),
0708 TP_fast_assign(
0709 DD_DEV_ASSIGN(dd);
0710 __entry->ctxt = ctxt;
0711 __entry->subctxt = subctxt;
0712 __entry->req = req;
0713 __entry->sde = sde;
0714 __entry->idx = ahgidx;
0715 __entry->len = len;
0716 __entry->tidval = tidval;
0717 memcpy(__entry->ahg, ahg, len * sizeof(u32));
0718 ),
0719 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
0720 __get_str(dev),
0721 __entry->ctxt,
0722 __entry->subctxt,
0723 __entry->req,
0724 __entry->sde,
0725 __entry->idx,
0726 __entry->len - 1,
0727 __print_u32_hex(__entry->ahg, __entry->len),
0728 __entry->tidval
0729 )
0730 );
0731
0732 TRACE_EVENT(hfi1_sdma_state,
0733 TP_PROTO(struct sdma_engine *sde,
0734 const char *cstate,
0735 const char *nstate
0736 ),
0737 TP_ARGS(sde, cstate, nstate),
0738 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
0739 __string(curstate, cstate)
0740 __string(newstate, nstate)
0741 ),
0742 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
0743 __assign_str(curstate, cstate);
0744 __assign_str(newstate, nstate);
0745 ),
0746 TP_printk("[%s] current state %s new state %s",
0747 __get_str(dev),
0748 __get_str(curstate),
0749 __get_str(newstate)
0750 )
0751 );
0752
0753 #define BCT_FORMAT \
0754 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
0755
0756 #define BCT(field) \
0757 be16_to_cpu( \
0758 ((struct buffer_control *)__get_dynamic_array(bct))->field \
0759 )
0760
0761 DECLARE_EVENT_CLASS(hfi1_bct_template,
0762 TP_PROTO(struct hfi1_devdata *dd,
0763 struct buffer_control *bc),
0764 TP_ARGS(dd, bc),
0765 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
0766 __dynamic_array(u8, bct, sizeof(*bc))
0767 ),
0768 TP_fast_assign(DD_DEV_ASSIGN(dd);
0769 memcpy(__get_dynamic_array(bct), bc,
0770 sizeof(*bc));
0771 ),
0772 TP_printk(BCT_FORMAT,
0773 BCT(overall_shared_limit),
0774
0775 BCT(vl[0].dedicated),
0776 BCT(vl[0].shared),
0777
0778 BCT(vl[1].dedicated),
0779 BCT(vl[1].shared),
0780
0781 BCT(vl[2].dedicated),
0782 BCT(vl[2].shared),
0783
0784 BCT(vl[3].dedicated),
0785 BCT(vl[3].shared),
0786
0787 BCT(vl[4].dedicated),
0788 BCT(vl[4].shared),
0789
0790 BCT(vl[5].dedicated),
0791 BCT(vl[5].shared),
0792
0793 BCT(vl[6].dedicated),
0794 BCT(vl[6].shared),
0795
0796 BCT(vl[7].dedicated),
0797 BCT(vl[7].shared),
0798
0799 BCT(vl[15].dedicated),
0800 BCT(vl[15].shared)
0801 )
0802 );
0803
0804 DEFINE_EVENT(hfi1_bct_template, bct_set,
0805 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
0806 TP_ARGS(dd, bc));
0807
0808 DEFINE_EVENT(hfi1_bct_template, bct_get,
0809 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
0810 TP_ARGS(dd, bc));
0811
0812 TRACE_EVENT(
0813 hfi1_qp_send_completion,
0814 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
0815 TP_ARGS(qp, wqe, idx),
0816 TP_STRUCT__entry(
0817 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
0818 __field(struct rvt_swqe *, wqe)
0819 __field(u64, wr_id)
0820 __field(u32, qpn)
0821 __field(u32, qpt)
0822 __field(u32, length)
0823 __field(u32, idx)
0824 __field(u32, ssn)
0825 __field(enum ib_wr_opcode, opcode)
0826 __field(int, send_flags)
0827 ),
0828 TP_fast_assign(
0829 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
0830 __entry->wqe = wqe;
0831 __entry->wr_id = wqe->wr.wr_id;
0832 __entry->qpn = qp->ibqp.qp_num;
0833 __entry->qpt = qp->ibqp.qp_type;
0834 __entry->length = wqe->length;
0835 __entry->idx = idx;
0836 __entry->ssn = wqe->ssn;
0837 __entry->opcode = wqe->wr.opcode;
0838 __entry->send_flags = wqe->wr.send_flags;
0839 ),
0840 TP_printk(
0841 "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
0842 __get_str(dev),
0843 __entry->qpn,
0844 __entry->qpt,
0845 __entry->wqe,
0846 __entry->idx,
0847 __entry->wr_id,
0848 __entry->length,
0849 __entry->ssn,
0850 __entry->opcode,
0851 __entry->send_flags
0852 )
0853 );
0854
0855 DECLARE_EVENT_CLASS(
0856 hfi1_do_send_template,
0857 TP_PROTO(struct rvt_qp *qp, bool flag),
0858 TP_ARGS(qp, flag),
0859 TP_STRUCT__entry(
0860 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
0861 __field(u32, qpn)
0862 __field(bool, flag)
0863 ),
0864 TP_fast_assign(
0865 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
0866 __entry->qpn = qp->ibqp.qp_num;
0867 __entry->flag = flag;
0868 ),
0869 TP_printk(
0870 "[%s] qpn %x flag %d",
0871 __get_str(dev),
0872 __entry->qpn,
0873 __entry->flag
0874 )
0875 );
0876
0877 DEFINE_EVENT(
0878 hfi1_do_send_template, hfi1_rc_do_send,
0879 TP_PROTO(struct rvt_qp *qp, bool flag),
0880 TP_ARGS(qp, flag)
0881 );
0882
0883 DEFINE_EVENT(
0884 hfi1_do_send_template, hfi1_rc_do_tid_send,
0885 TP_PROTO(struct rvt_qp *qp, bool flag),
0886 TP_ARGS(qp, flag)
0887 );
0888
0889 DEFINE_EVENT(
0890 hfi1_do_send_template, hfi1_rc_expired_time_slice,
0891 TP_PROTO(struct rvt_qp *qp, bool flag),
0892 TP_ARGS(qp, flag)
0893 );
0894
0895 DECLARE_EVENT_CLASS(
0896 hfi1_ipoib_txq_template,
0897 TP_PROTO(struct hfi1_ipoib_txq *txq),
0898 TP_ARGS(txq),
0899 TP_STRUCT__entry(
0900 DD_DEV_ENTRY(txq->priv->dd)
0901 __field(struct hfi1_ipoib_txq *, txq)
0902 __field(struct sdma_engine *, sde)
0903 __field(ulong, head)
0904 __field(ulong, tail)
0905 __field(uint, used)
0906 __field(uint, flow)
0907 __field(int, stops)
0908 __field(int, no_desc)
0909 __field(u8, idx)
0910 __field(u8, stopped)
0911 ),
0912 TP_fast_assign(
0913 DD_DEV_ASSIGN(txq->priv->dd);
0914 __entry->txq = txq;
0915 __entry->sde = txq->sde;
0916 __entry->head = txq->tx_ring.head;
0917 __entry->tail = txq->tx_ring.tail;
0918 __entry->idx = txq->q_idx;
0919 __entry->used =
0920 txq->tx_ring.sent_txreqs -
0921 txq->tx_ring.complete_txreqs;
0922 __entry->flow = txq->flow.as_int;
0923 __entry->stops = atomic_read(&txq->tx_ring.stops);
0924 __entry->no_desc = atomic_read(&txq->tx_ring.no_desc);
0925 __entry->stopped =
0926 __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
0927 ),
0928 TP_printk(
0929 "[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
0930 __get_str(dev),
0931 (unsigned long long)__entry->txq,
0932 __entry->idx,
0933 (unsigned long long)__entry->sde,
0934 __entry->sde ? __entry->sde->this_idx : 0,
0935 __entry->sde ? __entry->sde->cpu : 0,
0936 __entry->head,
0937 __entry->tail,
0938 __entry->flow,
0939 __entry->used,
0940 __entry->stops,
0941 __entry->no_desc,
0942 __entry->stopped
0943 )
0944 );
0945
0946 DEFINE_EVENT(
0947 hfi1_ipoib_txq_template, hfi1_txq_stop,
0948 TP_PROTO(struct hfi1_ipoib_txq *txq),
0949 TP_ARGS(txq)
0950 );
0951
0952 DEFINE_EVENT(
0953 hfi1_ipoib_txq_template, hfi1_txq_wake,
0954 TP_PROTO(struct hfi1_ipoib_txq *txq),
0955 TP_ARGS(txq)
0956 );
0957
0958 DEFINE_EVENT(
0959 hfi1_ipoib_txq_template, hfi1_flow_flush,
0960 TP_PROTO(struct hfi1_ipoib_txq *txq),
0961 TP_ARGS(txq)
0962 );
0963
0964 DEFINE_EVENT(
0965 hfi1_ipoib_txq_template, hfi1_flow_switch,
0966 TP_PROTO(struct hfi1_ipoib_txq *txq),
0967 TP_ARGS(txq)
0968 );
0969
0970 DEFINE_EVENT(
0971 hfi1_ipoib_txq_template, hfi1_txq_wakeup,
0972 TP_PROTO(struct hfi1_ipoib_txq *txq),
0973 TP_ARGS(txq)
0974 );
0975
0976 DEFINE_EVENT(
0977 hfi1_ipoib_txq_template, hfi1_txq_full,
0978 TP_PROTO(struct hfi1_ipoib_txq *txq),
0979 TP_ARGS(txq)
0980 );
0981
0982 DEFINE_EVENT(
0983 hfi1_ipoib_txq_template, hfi1_txq_queued,
0984 TP_PROTO(struct hfi1_ipoib_txq *txq),
0985 TP_ARGS(txq)
0986 );
0987
0988 DEFINE_EVENT(
0989 hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
0990 TP_PROTO(struct hfi1_ipoib_txq *txq),
0991 TP_ARGS(txq)
0992 );
0993
0994 DEFINE_EVENT(
0995 hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
0996 TP_PROTO(struct hfi1_ipoib_txq *txq),
0997 TP_ARGS(txq)
0998 );
0999
1000 DECLARE_EVENT_CLASS(
1001 hfi1_ipoib_tx_template,
1002 TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1003 TP_ARGS(tx, idx),
1004 TP_STRUCT__entry(
1005 DD_DEV_ENTRY(tx->txq->priv->dd)
1006 __field(struct ipoib_txreq *, tx)
1007 __field(struct hfi1_ipoib_txq *, txq)
1008 __field(struct sk_buff *, skb)
1009 __field(ulong, idx)
1010 ),
1011 TP_fast_assign(
1012 DD_DEV_ASSIGN(tx->txq->priv->dd);
1013 __entry->tx = tx;
1014 __entry->skb = tx->skb;
1015 __entry->txq = tx->txq;
1016 __entry->idx = idx;
1017 ),
1018 TP_printk(
1019 "[%s] tx %llx txq %llx,%u skb %llx idx %lu",
1020 __get_str(dev),
1021 (unsigned long long)__entry->tx,
1022 (unsigned long long)__entry->txq,
1023 __entry->txq ? __entry->txq->q_idx : 0,
1024 (unsigned long long)__entry->skb,
1025 __entry->idx
1026 )
1027 );
1028
1029 DEFINE_EVENT(
1030 hfi1_ipoib_tx_template, hfi1_tx_produce,
1031 TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1032 TP_ARGS(tx, idx)
1033 );
1034
1035 DEFINE_EVENT(
1036 hfi1_ipoib_tx_template, hfi1_tx_consume,
1037 TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1038 TP_ARGS(tx, idx)
1039 );
1040
1041 DEFINE_EVENT(
1042 hfi1_ipoib_txq_template, hfi1_txq_alloc_tx,
1043 TP_PROTO(struct hfi1_ipoib_txq *txq),
1044 TP_ARGS(txq)
1045 );
1046
1047 DEFINE_EVENT(
1048 hfi1_ipoib_txq_template, hfi1_txq_poll,
1049 TP_PROTO(struct hfi1_ipoib_txq *txq),
1050 TP_ARGS(txq)
1051 );
1052
1053 DEFINE_EVENT(
1054 hfi1_ipoib_txq_template, hfi1_txq_complete,
1055 TP_PROTO(struct hfi1_ipoib_txq *txq),
1056 TP_ARGS(txq)
1057 );
1058
1059 #endif
1060
1061 #undef TRACE_INCLUDE_PATH
1062 #undef TRACE_INCLUDE_FILE
1063 #define TRACE_INCLUDE_PATH .
1064 #define TRACE_INCLUDE_FILE trace_tx
1065 #include <trace/define_trace.h>