Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
0003 
0004 #include <linux/debugfs.h>
0005 #include "eswitch.h"
0006 
0007 enum vnic_diag_counter {
0008     MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE,
0009     MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW,
0010     MLX5_VNIC_DIAG_COMP_EQ_OVERRUN,
0011     MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN,
0012     MLX5_VNIC_DIAG_CQ_OVERRUN,
0013     MLX5_VNIC_DIAG_INVALID_COMMAND,
0014     MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
0015 };
0016 
0017 static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
0018                     u32 *val)
0019 {
0020     u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
0021     u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
0022     struct mlx5_core_dev *dev = vport->dev;
0023     u16 vport_num = vport->vport;
0024     void *vnic_diag_out;
0025     int err;
0026 
0027     MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
0028     MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
0029     if (!mlx5_esw_is_manager_vport(dev->priv.eswitch, vport_num))
0030         MLX5_SET(query_vnic_env_in, in, other_vport, 1);
0031 
0032     err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
0033     if (err)
0034         return err;
0035 
0036     vnic_diag_out = MLX5_ADDR_OF(query_vnic_env_out, out, vport_env);
0037     switch (counter) {
0038     case MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE:
0039         *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, total_error_queues);
0040         break;
0041     case MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW:
0042         *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out,
0043                 send_queue_priority_update_flow);
0044         break;
0045     case MLX5_VNIC_DIAG_COMP_EQ_OVERRUN:
0046         *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, comp_eq_overrun);
0047         break;
0048     case MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN:
0049         *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, async_eq_overrun);
0050         break;
0051     case MLX5_VNIC_DIAG_CQ_OVERRUN:
0052         *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, cq_overrun);
0053         break;
0054     case MLX5_VNIC_DIAG_INVALID_COMMAND:
0055         *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, invalid_command);
0056         break;
0057     case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
0058         *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
0059         break;
0060     }
0061 
0062     return 0;
0063 }
0064 
0065 static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
0066                 enum vnic_diag_counter type)
0067 {
0068     u32 val = 0;
0069     int ret;
0070 
0071     ret = mlx5_esw_query_vnic_diag(vport, type, &val);
0072     if (ret)
0073         return ret;
0074 
0075     seq_printf(file, "%d\n", val);
0076     return 0;
0077 }
0078 
0079 static int total_q_under_processor_handle_show(struct seq_file *file, void *priv)
0080 {
0081     return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE);
0082 }
0083 
0084 static int send_queue_priority_update_flow_show(struct seq_file *file, void *priv)
0085 {
0086     return __show_vnic_diag(file, file->private,
0087                 MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW);
0088 }
0089 
0090 static int comp_eq_overrun_show(struct seq_file *file, void *priv)
0091 {
0092     return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_COMP_EQ_OVERRUN);
0093 }
0094 
0095 static int async_eq_overrun_show(struct seq_file *file, void *priv)
0096 {
0097     return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN);
0098 }
0099 
0100 static int cq_overrun_show(struct seq_file *file, void *priv)
0101 {
0102     return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_CQ_OVERRUN);
0103 }
0104 
0105 static int invalid_command_show(struct seq_file *file, void *priv)
0106 {
0107     return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_INVALID_COMMAND);
0108 }
0109 
0110 static int quota_exceeded_command_show(struct seq_file *file, void *priv)
0111 {
0112     return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
0113 }
0114 
0115 DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
0116 DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
0117 DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
0118 DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
0119 DEFINE_SHOW_ATTRIBUTE(cq_overrun);
0120 DEFINE_SHOW_ATTRIBUTE(invalid_command);
0121 DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
0122 
0123 void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
0124 {
0125     struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
0126 
0127     debugfs_remove_recursive(vport->dbgfs);
0128     vport->dbgfs = NULL;
0129 }
0130 
0131 /* vnic diag dir name is "pf", "ecpf" or "{vf/sf}_xxxx" */
0132 #define VNIC_DIAG_DIR_NAME_MAX_LEN 8
0133 
0134 void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num)
0135 {
0136     struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
0137     struct dentry *vnic_diag;
0138     char dir_name[VNIC_DIAG_DIR_NAME_MAX_LEN];
0139     int err;
0140 
0141     if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
0142         return;
0143 
0144     if (vport_num == MLX5_VPORT_PF) {
0145         strcpy(dir_name, "pf");
0146     } else if (vport_num == MLX5_VPORT_ECPF) {
0147         strcpy(dir_name, "ecpf");
0148     } else {
0149         err = snprintf(dir_name, VNIC_DIAG_DIR_NAME_MAX_LEN, "%s_%d", is_sf ? "sf" : "vf",
0150                    is_sf ? sf_num : vport_num - MLX5_VPORT_FIRST_VF);
0151         if (WARN_ON(err < 0))
0152             return;
0153     }
0154 
0155     vport->dbgfs = debugfs_create_dir(dir_name, esw->dbgfs);
0156     vnic_diag = debugfs_create_dir("vnic_diag", vport->dbgfs);
0157 
0158     if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) {
0159         debugfs_create_file("total_q_under_processor_handle", 0444, vnic_diag, vport,
0160                     &total_q_under_processor_handle_fops);
0161         debugfs_create_file("send_queue_priority_update_flow", 0444, vnic_diag, vport,
0162                     &send_queue_priority_update_flow_fops);
0163     }
0164 
0165     if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) {
0166         debugfs_create_file("comp_eq_overrun", 0444, vnic_diag, vport,
0167                     &comp_eq_overrun_fops);
0168         debugfs_create_file("async_eq_overrun", 0444, vnic_diag, vport,
0169                     &async_eq_overrun_fops);
0170     }
0171 
0172     if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun))
0173         debugfs_create_file("cq_overrun", 0444, vnic_diag, vport, &cq_overrun_fops);
0174 
0175     if (MLX5_CAP_GEN(esw->dev, invalid_command_count))
0176         debugfs_create_file("invalid_command", 0444, vnic_diag, vport,
0177                     &invalid_command_fops);
0178 
0179     if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
0180         debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
0181                     &quota_exceeded_command_fops);
0182 }