0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/debugfs.h>
0034 #include <linux/mlx5/qp.h>
0035 #include <linux/mlx5/cq.h>
0036 #include <linux/mlx5/driver.h>
0037 #include "mlx5_core.h"
0038 #include "lib/eq.h"
0039
0040 enum {
0041 QP_PID,
0042 QP_STATE,
0043 QP_XPORT,
0044 QP_MTU,
0045 QP_N_RECV,
0046 QP_RECV_SZ,
0047 QP_N_SEND,
0048 QP_LOG_PG_SZ,
0049 QP_RQPN,
0050 };
0051
0052 static char *qp_fields[] = {
0053 [QP_PID] = "pid",
0054 [QP_STATE] = "state",
0055 [QP_XPORT] = "transport",
0056 [QP_MTU] = "mtu",
0057 [QP_N_RECV] = "num_recv",
0058 [QP_RECV_SZ] = "rcv_wqe_sz",
0059 [QP_N_SEND] = "num_send",
0060 [QP_LOG_PG_SZ] = "log2_page_sz",
0061 [QP_RQPN] = "remote_qpn",
0062 };
0063
0064 enum {
0065 EQ_NUM_EQES,
0066 EQ_INTR,
0067 EQ_LOG_PG_SZ,
0068 };
0069
0070 static char *eq_fields[] = {
0071 [EQ_NUM_EQES] = "num_eqes",
0072 [EQ_INTR] = "intr",
0073 [EQ_LOG_PG_SZ] = "log_page_size",
0074 };
0075
0076 enum {
0077 CQ_PID,
0078 CQ_NUM_CQES,
0079 CQ_LOG_PG_SZ,
0080 };
0081
0082 static char *cq_fields[] = {
0083 [CQ_PID] = "pid",
0084 [CQ_NUM_CQES] = "num_cqes",
0085 [CQ_LOG_PG_SZ] = "log_page_size",
0086 };
0087
0088 struct dentry *mlx5_debugfs_root;
0089 EXPORT_SYMBOL(mlx5_debugfs_root);
0090
0091 void mlx5_register_debugfs(void)
0092 {
0093 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
0094 }
0095
0096 void mlx5_unregister_debugfs(void)
0097 {
0098 debugfs_remove(mlx5_debugfs_root);
0099 }
0100
0101 struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
0102 {
0103 return dev->priv.dbg.dbg_root;
0104 }
0105 EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
0106
0107 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
0108 {
0109 dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
0110 }
0111 EXPORT_SYMBOL(mlx5_qp_debugfs_init);
0112
0113 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
0114 {
0115 debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
0116 }
0117 EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
0118
0119 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
0120 {
0121 dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
0122 }
0123
0124 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
0125 {
0126 debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
0127 }
0128
0129 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
0130 loff_t *pos)
0131 {
0132 struct mlx5_cmd_stats *stats;
0133 u64 field = 0;
0134 int ret;
0135 char tbuf[22];
0136
0137 stats = filp->private_data;
0138 spin_lock_irq(&stats->lock);
0139 if (stats->n)
0140 field = div64_u64(stats->sum, stats->n);
0141 spin_unlock_irq(&stats->lock);
0142 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
0143 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
0144 }
0145
0146 static ssize_t average_write(struct file *filp, const char __user *buf,
0147 size_t count, loff_t *pos)
0148 {
0149 struct mlx5_cmd_stats *stats;
0150
0151 stats = filp->private_data;
0152 spin_lock_irq(&stats->lock);
0153 stats->sum = 0;
0154 stats->n = 0;
0155 spin_unlock_irq(&stats->lock);
0156
0157 *pos += count;
0158
0159 return count;
0160 }
0161
0162 static const struct file_operations stats_fops = {
0163 .owner = THIS_MODULE,
0164 .open = simple_open,
0165 .read = average_read,
0166 .write = average_write,
0167 };
0168
0169 static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
0170 loff_t *pos)
0171 {
0172 struct mlx5_cmd *cmd;
0173 char tbuf[6];
0174 int weight;
0175 int field;
0176 int ret;
0177
0178 cmd = filp->private_data;
0179 weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
0180 field = cmd->max_reg_cmds - weight;
0181 ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
0182 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
0183 }
0184
0185 static const struct file_operations slots_fops = {
0186 .owner = THIS_MODULE,
0187 .open = simple_open,
0188 .read = slots_read,
0189 };
0190
0191 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
0192 {
0193 struct mlx5_cmd_stats *stats;
0194 struct dentry **cmd;
0195 const char *namep;
0196 int i;
0197
0198 cmd = &dev->priv.dbg.cmdif_debugfs;
0199 *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
0200
0201 debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
0202
0203 for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
0204 stats = &dev->cmd.stats[i];
0205 namep = mlx5_command_str(i);
0206 if (strcmp(namep, "unknown command opcode")) {
0207 stats->root = debugfs_create_dir(namep, *cmd);
0208
0209 debugfs_create_file("average", 0400, stats->root, stats,
0210 &stats_fops);
0211 debugfs_create_u64("n", 0400, stats->root, &stats->n);
0212 debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
0213 debugfs_create_u64("failed_mbox_status", 0400, stats->root,
0214 &stats->failed_mbox_status);
0215 debugfs_create_u32("last_failed_errno", 0400, stats->root,
0216 &stats->last_failed_errno);
0217 debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
0218 &stats->last_failed_mbox_status);
0219 debugfs_create_x32("last_failed_syndrome", 0400, stats->root,
0220 &stats->last_failed_syndrome);
0221 }
0222 }
0223 }
0224
0225 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
0226 {
0227 debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
0228 }
0229
0230 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
0231 {
0232 dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
0233 }
0234
0235 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
0236 {
0237 debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
0238 }
0239
0240 void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
0241 {
0242 struct dentry *pages;
0243
0244 dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
0245 pages = dev->priv.dbg.pages_debugfs;
0246
0247 debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
0248 debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
0249 debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
0250 debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
0251 debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
0252 debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
0253 &dev->priv.reclaim_pages_discard);
0254 }
0255
0256 void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
0257 {
0258 debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
0259 }
0260
0261 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
0262 int index, int *is_str)
0263 {
0264 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
0265 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
0266 u64 param = 0;
0267 u32 *out;
0268 int state;
0269 u32 *qpc;
0270 int err;
0271
0272 out = kzalloc(outlen, GFP_KERNEL);
0273 if (!out)
0274 return 0;
0275
0276 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
0277 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
0278 err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
0279 if (err)
0280 goto out;
0281
0282 *is_str = 0;
0283
0284 qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
0285 switch (index) {
0286 case QP_PID:
0287 param = qp->pid;
0288 break;
0289 case QP_STATE:
0290 state = MLX5_GET(qpc, qpc, state);
0291 param = (unsigned long)mlx5_qp_state_str(state);
0292 *is_str = 1;
0293 break;
0294 case QP_XPORT:
0295 param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
0296 *is_str = 1;
0297 break;
0298 case QP_MTU:
0299 switch (MLX5_GET(qpc, qpc, mtu)) {
0300 case IB_MTU_256:
0301 param = 256;
0302 break;
0303 case IB_MTU_512:
0304 param = 512;
0305 break;
0306 case IB_MTU_1024:
0307 param = 1024;
0308 break;
0309 case IB_MTU_2048:
0310 param = 2048;
0311 break;
0312 case IB_MTU_4096:
0313 param = 4096;
0314 break;
0315 default:
0316 param = 0;
0317 }
0318 break;
0319 case QP_N_RECV:
0320 param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
0321 break;
0322 case QP_RECV_SZ:
0323 param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
0324 break;
0325 case QP_N_SEND:
0326 if (!MLX5_GET(qpc, qpc, no_sq))
0327 param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
0328 break;
0329 case QP_LOG_PG_SZ:
0330 param = MLX5_GET(qpc, qpc, log_page_size) + 12;
0331 break;
0332 case QP_RQPN:
0333 param = MLX5_GET(qpc, qpc, remote_qpn);
0334 break;
0335 }
0336 out:
0337 kfree(out);
0338 return param;
0339 }
0340
0341 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
0342 int index)
0343 {
0344 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
0345 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
0346 u64 param = 0;
0347 void *ctx;
0348 u32 *out;
0349 int err;
0350
0351 out = kzalloc(outlen, GFP_KERNEL);
0352 if (!out)
0353 return param;
0354
0355 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
0356 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
0357 err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
0358 if (err) {
0359 mlx5_core_warn(dev, "failed to query eq\n");
0360 goto out;
0361 }
0362 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
0363
0364 switch (index) {
0365 case EQ_NUM_EQES:
0366 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
0367 break;
0368 case EQ_INTR:
0369 param = MLX5_GET(eqc, ctx, intr);
0370 break;
0371 case EQ_LOG_PG_SZ:
0372 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
0373 break;
0374 }
0375
0376 out:
0377 kfree(out);
0378 return param;
0379 }
0380
0381 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
0382 int index)
0383 {
0384 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
0385 u64 param = 0;
0386 void *ctx;
0387 u32 *out;
0388 int err;
0389
0390 out = kvzalloc(outlen, GFP_KERNEL);
0391 if (!out)
0392 return param;
0393
0394 err = mlx5_core_query_cq(dev, cq, out);
0395 if (err) {
0396 mlx5_core_warn(dev, "failed to query cq\n");
0397 goto out;
0398 }
0399 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
0400
0401 switch (index) {
0402 case CQ_PID:
0403 param = cq->pid;
0404 break;
0405 case CQ_NUM_CQES:
0406 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
0407 break;
0408 case CQ_LOG_PG_SZ:
0409 param = MLX5_GET(cqc, ctx, log_page_size);
0410 break;
0411 }
0412
0413 out:
0414 kvfree(out);
0415 return param;
0416 }
0417
0418 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
0419 loff_t *pos)
0420 {
0421 struct mlx5_field_desc *desc;
0422 struct mlx5_rsc_debug *d;
0423 char tbuf[18];
0424 int is_str = 0;
0425 u64 field;
0426 int ret;
0427
0428 desc = filp->private_data;
0429 d = (void *)(desc - desc->i) - sizeof(*d);
0430 switch (d->type) {
0431 case MLX5_DBG_RSC_QP:
0432 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
0433 break;
0434
0435 case MLX5_DBG_RSC_EQ:
0436 field = eq_read_field(d->dev, d->object, desc->i);
0437 break;
0438
0439 case MLX5_DBG_RSC_CQ:
0440 field = cq_read_field(d->dev, d->object, desc->i);
0441 break;
0442
0443 default:
0444 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
0445 return -EINVAL;
0446 }
0447
0448 if (is_str)
0449 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
0450 else
0451 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
0452
0453 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
0454 }
0455
0456 static const struct file_operations fops = {
0457 .owner = THIS_MODULE,
0458 .open = simple_open,
0459 .read = dbg_read,
0460 };
0461
0462 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
0463 struct dentry *root, struct mlx5_rsc_debug **dbg,
0464 int rsn, char **field, int nfile, void *data)
0465 {
0466 struct mlx5_rsc_debug *d;
0467 char resn[32];
0468 int i;
0469
0470 d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
0471 if (!d)
0472 return -ENOMEM;
0473
0474 d->dev = dev;
0475 d->object = data;
0476 d->type = type;
0477 sprintf(resn, "0x%x", rsn);
0478 d->root = debugfs_create_dir(resn, root);
0479
0480 for (i = 0; i < nfile; i++) {
0481 d->fields[i].i = i;
0482 debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
0483 &fops);
0484 }
0485 *dbg = d;
0486
0487 return 0;
0488 }
0489
0490 static void rem_res_tree(struct mlx5_rsc_debug *d)
0491 {
0492 debugfs_remove_recursive(d->root);
0493 kfree(d);
0494 }
0495
0496 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
0497 {
0498 int err;
0499
0500 if (!mlx5_debugfs_root)
0501 return 0;
0502
0503 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
0504 &qp->dbg, qp->qpn, qp_fields,
0505 ARRAY_SIZE(qp_fields), qp);
0506 if (err)
0507 qp->dbg = NULL;
0508
0509 return err;
0510 }
0511 EXPORT_SYMBOL(mlx5_debug_qp_add);
0512
0513 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
0514 {
0515 if (!mlx5_debugfs_root)
0516 return;
0517
0518 if (qp->dbg)
0519 rem_res_tree(qp->dbg);
0520 }
0521 EXPORT_SYMBOL(mlx5_debug_qp_remove);
0522
0523 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
0524 {
0525 int err;
0526
0527 if (!mlx5_debugfs_root)
0528 return 0;
0529
0530 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
0531 &eq->dbg, eq->eqn, eq_fields,
0532 ARRAY_SIZE(eq_fields), eq);
0533 if (err)
0534 eq->dbg = NULL;
0535
0536 return err;
0537 }
0538
0539 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
0540 {
0541 if (!mlx5_debugfs_root)
0542 return;
0543
0544 if (eq->dbg)
0545 rem_res_tree(eq->dbg);
0546 }
0547
0548 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
0549 {
0550 int err;
0551
0552 if (!mlx5_debugfs_root)
0553 return 0;
0554
0555 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
0556 &cq->dbg, cq->cqn, cq_fields,
0557 ARRAY_SIZE(cq_fields), cq);
0558 if (err)
0559 cq->dbg = NULL;
0560
0561 return err;
0562 }
0563
0564 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
0565 {
0566 if (!mlx5_debugfs_root)
0567 return;
0568
0569 if (cq->dbg) {
0570 rem_res_tree(cq->dbg);
0571 cq->dbg = NULL;
0572 }
0573 }