Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /*
0003  * Copyright (c) 2019-2020, Mellanox Technologies Ltd. All rights reserved.
0004  */
0005 
0006 #include <uapi/rdma/rdma_netlink.h>
0007 #include <linux/mlx5/rsc_dump.h>
0008 #include <rdma/ib_umem_odp.h>
0009 #include <rdma/restrack.h>
0010 #include "mlx5_ib.h"
0011 #include "restrack.h"
0012 
0013 #define MAX_DUMP_SIZE 1024
0014 
0015 static int dump_rsc(struct mlx5_core_dev *dev, enum mlx5_sgmt_type type,
0016             int index, void *data, int *data_len)
0017 {
0018     struct mlx5_core_dev *mdev = dev;
0019     struct mlx5_rsc_dump_cmd *cmd;
0020     struct mlx5_rsc_key key = {};
0021     struct page *page;
0022     int offset = 0;
0023     int err = 0;
0024     int cmd_err;
0025     int size;
0026 
0027     page = alloc_page(GFP_KERNEL);
0028     if (!page)
0029         return -ENOMEM;
0030 
0031     key.size = PAGE_SIZE;
0032     key.rsc = type;
0033     key.index1 = index;
0034     key.num_of_obj1 = 1;
0035 
0036     cmd = mlx5_rsc_dump_cmd_create(mdev, &key);
0037     if (IS_ERR(cmd)) {
0038         err = PTR_ERR(cmd);
0039         goto free_page;
0040     }
0041 
0042     do {
0043         cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
0044         if (cmd_err < 0 || size + offset > MAX_DUMP_SIZE) {
0045             err = cmd_err;
0046             goto destroy_cmd;
0047         }
0048         memcpy(data + offset, page_address(page), size);
0049         offset += size;
0050     } while (cmd_err > 0);
0051     *data_len = offset;
0052 
0053 destroy_cmd:
0054     mlx5_rsc_dump_cmd_destroy(cmd);
0055 free_page:
0056     __free_page(page);
0057     return err;
0058 }
0059 
0060 static int fill_res_raw(struct sk_buff *msg, struct mlx5_ib_dev *dev,
0061             enum mlx5_sgmt_type type, u32 key)
0062 {
0063     int len = 0;
0064     void *data;
0065     int err;
0066 
0067     data = kzalloc(MAX_DUMP_SIZE, GFP_KERNEL);
0068     if (!data)
0069         return -ENOMEM;
0070 
0071     err = dump_rsc(dev->mdev, type, key, data, &len);
0072     if (err)
0073         goto out;
0074 
0075     err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
0076 out:
0077     kfree(data);
0078     return err;
0079 }
0080 
0081 static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
0082 {
0083     struct mlx5_ib_mr *mr = to_mmr(ibmr);
0084     struct nlattr *table_attr;
0085 
0086     if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
0087         return 0;
0088 
0089     table_attr = nla_nest_start(msg,
0090                     RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
0091 
0092     if (!table_attr)
0093         goto err;
0094 
0095     if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
0096                      atomic64_read(&mr->odp_stats.faults)))
0097         goto err_table;
0098     if (rdma_nl_stat_hwcounter_entry(
0099             msg, "page_invalidations",
0100             atomic64_read(&mr->odp_stats.invalidations)))
0101         goto err_table;
0102     if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
0103                      atomic64_read(&mr->odp_stats.prefetch)))
0104         goto err_table;
0105 
0106     nla_nest_end(msg, table_attr);
0107     return 0;
0108 
0109 err_table:
0110     nla_nest_cancel(msg, table_attr);
0111 err:
0112     return -EMSGSIZE;
0113 }
0114 
0115 static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr)
0116 {
0117     struct mlx5_ib_mr *mr = to_mmr(ibmr);
0118 
0119     return fill_res_raw(msg, mr_to_mdev(mr), MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
0120                 mlx5_mkey_to_idx(mr->mmkey.key));
0121 }
0122 
0123 static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
0124 {
0125     struct mlx5_ib_mr *mr = to_mmr(ibmr);
0126     struct nlattr *table_attr;
0127 
0128     if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
0129         return 0;
0130 
0131     table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
0132     if (!table_attr)
0133         goto err;
0134 
0135     if (mr->is_odp_implicit) {
0136         if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
0137             goto err;
0138     } else {
0139         if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
0140             goto err;
0141     }
0142 
0143     nla_nest_end(msg, table_attr);
0144     return 0;
0145 
0146 err:
0147     nla_nest_cancel(msg, table_attr);
0148     return -EMSGSIZE;
0149 }
0150 
0151 static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq)
0152 {
0153     struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
0154     struct mlx5_ib_cq *cq = to_mcq(ibcq);
0155 
0156     return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn);
0157 }
0158 
0159 static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
0160 {
0161     struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
0162 
0163     return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_QP,
0164                 ibqp->qp_num);
0165 }
0166 
0167 static const struct ib_device_ops restrack_ops = {
0168     .fill_res_cq_entry_raw = fill_res_cq_entry_raw,
0169     .fill_res_mr_entry = fill_res_mr_entry,
0170     .fill_res_mr_entry_raw = fill_res_mr_entry_raw,
0171     .fill_res_qp_entry_raw = fill_res_qp_entry_raw,
0172     .fill_stat_mr_entry = fill_stat_mr_entry,
0173 };
0174 
0175 int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev)
0176 {
0177     ib_set_device_ops(&dev->ib_dev, &restrack_ops);
0178     return 0;
0179 }