0001
0002
0003
0004 #include <linux/iova.h>
0005 #include <linux/mlx5/driver.h>
0006 #include "mlx5_vdpa.h"
0007
0008 static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
0009 {
0010 struct mlx5_core_dev *mdev = dev->mdev;
0011
0012 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
0013 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
0014 int err;
0015
0016 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
0017 MLX5_SET(alloc_pd_in, in, uid, uid);
0018
0019 err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
0020 if (!err)
0021 *pdn = MLX5_GET(alloc_pd_out, out, pd);
0022
0023 return err;
0024 }
0025
0026 static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
0027 {
0028 u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
0029 struct mlx5_core_dev *mdev = dev->mdev;
0030
0031 MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
0032 MLX5_SET(dealloc_pd_in, in, pd, pdn);
0033 MLX5_SET(dealloc_pd_in, in, uid, uid);
0034 return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
0035 }
0036
0037 static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
0038 {
0039 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
0040 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
0041 struct mlx5_core_dev *mdev = dev->mdev;
0042 int err;
0043
0044 MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
0045 err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
0046 if (!err)
0047 *null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
0048 return err;
0049 }
0050
0051 static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
0052 {
0053 u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
0054 int inlen;
0055 void *in;
0056 int err;
0057
0058 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0))
0059 return 0;
0060
0061
0062 if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
0063 return -EOPNOTSUPP;
0064
0065 inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
0066 in = kzalloc(inlen, GFP_KERNEL);
0067 if (!in)
0068 return -ENOMEM;
0069
0070 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
0071 MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
0072
0073 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
0074 kfree(in);
0075 if (!err)
0076 *uid = MLX5_GET(create_uctx_out, out, uid);
0077
0078 return err;
0079 }
0080
0081 static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
0082 {
0083 u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
0084 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
0085
0086 if (!uid)
0087 return;
0088
0089 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
0090 MLX5_SET(destroy_uctx_in, in, uid, uid);
0091
0092 mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
0093 }
0094
0095 int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
0096 {
0097 u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
0098 int err;
0099
0100 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
0101 MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
0102 err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
0103 if (!err)
0104 *tisn = MLX5_GET(create_tis_out, out, tisn);
0105
0106 return err;
0107 }
0108
0109 void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
0110 {
0111 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
0112
0113 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
0114 MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
0115 MLX5_SET(destroy_tis_in, in, tisn, tisn);
0116 mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
0117 }
0118
0119 int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
0120 {
0121 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
0122 int err;
0123
0124 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
0125 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
0126 if (!err)
0127 *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
0128
0129 return err;
0130 }
0131
0132 int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn)
0133 {
0134 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
0135
0136 MLX5_SET(modify_rqt_in, in, uid, mvdev->res.uid);
0137 MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
0138 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
0139 return mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
0140 }
0141
0142 void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
0143 {
0144 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
0145
0146 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
0147 MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
0148 MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
0149 mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
0150 }
0151
0152 int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
0153 {
0154 u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
0155 int err;
0156
0157 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
0158 err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
0159 if (!err)
0160 *tirn = MLX5_GET(create_tir_out, out, tirn);
0161
0162 return err;
0163 }
0164
0165 void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
0166 {
0167 u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
0168
0169 MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
0170 MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
0171 MLX5_SET(destroy_tir_in, in, tirn, tirn);
0172 mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
0173 }
0174
0175 int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
0176 {
0177 u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
0178 u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
0179 int err;
0180
0181 MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
0182 MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
0183
0184 err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
0185 if (!err)
0186 *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
0187
0188 return err;
0189 }
0190
0191 void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
0192 {
0193 u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
0194
0195 MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
0196 MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
0197 MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
0198 mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
0199 }
0200
0201 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
0202 int inlen)
0203 {
0204 u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
0205 u32 mkey_index;
0206 int err;
0207
0208 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
0209 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
0210
0211 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
0212 if (err)
0213 return err;
0214
0215 mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
0216 *mkey |= mlx5_idx_to_mkey(mkey_index);
0217 return 0;
0218 }
0219
0220 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey)
0221 {
0222 u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
0223
0224 MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
0225 MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
0226 MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey));
0227 return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
0228 }
0229
0230 static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
0231 {
0232 mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0);
0233 if (!mvdev->cvq.iotlb)
0234 return -ENOMEM;
0235
0236 vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock);
0237
0238 return 0;
0239 }
0240
0241 static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
0242 {
0243 vhost_iotlb_free(mvdev->cvq.iotlb);
0244 }
0245
0246 int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
0247 {
0248 u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
0249 struct mlx5_vdpa_resources *res = &mvdev->res;
0250 struct mlx5_core_dev *mdev = mvdev->mdev;
0251 u64 kick_addr;
0252 int err;
0253
0254 if (res->valid) {
0255 mlx5_vdpa_warn(mvdev, "resources already allocated\n");
0256 return -EINVAL;
0257 }
0258 mutex_init(&mvdev->mr.mkey_mtx);
0259 res->uar = mlx5_get_uars_page(mdev);
0260 if (IS_ERR(res->uar)) {
0261 err = PTR_ERR(res->uar);
0262 goto err_uars;
0263 }
0264
0265 err = create_uctx(mvdev, &res->uid);
0266 if (err)
0267 goto err_uctx;
0268
0269 err = alloc_pd(mvdev, &res->pdn, res->uid);
0270 if (err)
0271 goto err_pd;
0272
0273 err = get_null_mkey(mvdev, &res->null_mkey);
0274 if (err)
0275 goto err_key;
0276
0277 kick_addr = mdev->bar_addr + offset;
0278 res->phys_kick_addr = kick_addr;
0279
0280 res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
0281 if (!res->kick_addr) {
0282 err = -ENOMEM;
0283 goto err_key;
0284 }
0285
0286 err = init_ctrl_vq(mvdev);
0287 if (err)
0288 goto err_ctrl;
0289
0290 res->valid = true;
0291
0292 return 0;
0293
0294 err_ctrl:
0295 iounmap(res->kick_addr);
0296 err_key:
0297 dealloc_pd(mvdev, res->pdn, res->uid);
0298 err_pd:
0299 destroy_uctx(mvdev, res->uid);
0300 err_uctx:
0301 mlx5_put_uars_page(mdev, res->uar);
0302 err_uars:
0303 mutex_destroy(&mvdev->mr.mkey_mtx);
0304 return err;
0305 }
0306
0307 void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
0308 {
0309 struct mlx5_vdpa_resources *res = &mvdev->res;
0310
0311 if (!res->valid)
0312 return;
0313
0314 cleanup_ctrl_vq(mvdev);
0315 iounmap(res->kick_addr);
0316 res->kick_addr = NULL;
0317 dealloc_pd(mvdev, res->pdn, res->uid);
0318 destroy_uctx(mvdev, res->uid);
0319 mlx5_put_uars_page(mvdev->mdev, res->uar);
0320 mutex_destroy(&mvdev->mr.mkey_mtx);
0321 res->valid = false;
0322 }