0001
0002
0003
0004
0005
0006 #include <rdma/uverbs_std_types.h>
0007 #include "dm.h"
0008
0009 #define UVERBS_MODULE_NAME mlx5_ib
0010 #include <rdma/uverbs_named_ioctl.h>
0011
0012 static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
0013 u64 length, u32 alignment)
0014 {
0015 struct mlx5_core_dev *dev = dm->dev;
0016 u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
0017 >> PAGE_SHIFT;
0018 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
0019 u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
0020 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
0021 u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
0022 u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
0023 u32 mlx5_alignment;
0024 u64 page_idx = 0;
0025 int ret = 0;
0026
0027 if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
0028 return -EINVAL;
0029
0030
0031
0032
0033 mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
0034 alignment - MLX5_MEMIC_BASE_ALIGN;
0035 if (mlx5_alignment > max_alignment)
0036 return -EINVAL;
0037
0038 MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
0039 MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
0040 MLX5_SET(alloc_memic_in, in, memic_size, length);
0041 MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
0042 mlx5_alignment);
0043
0044 while (page_idx < num_memic_hw_pages) {
0045 spin_lock(&dm->lock);
0046 page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
0047 num_memic_hw_pages,
0048 page_idx,
0049 num_pages, 0);
0050
0051 if (page_idx < num_memic_hw_pages)
0052 bitmap_set(dm->memic_alloc_pages,
0053 page_idx, num_pages);
0054
0055 spin_unlock(&dm->lock);
0056
0057 if (page_idx >= num_memic_hw_pages)
0058 break;
0059
0060 MLX5_SET64(alloc_memic_in, in, range_start_addr,
0061 hw_start_addr + (page_idx * PAGE_SIZE));
0062
0063 ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
0064 if (ret) {
0065 spin_lock(&dm->lock);
0066 bitmap_clear(dm->memic_alloc_pages,
0067 page_idx, num_pages);
0068 spin_unlock(&dm->lock);
0069
0070 if (ret == -EAGAIN) {
0071 page_idx++;
0072 continue;
0073 }
0074
0075 return ret;
0076 }
0077
0078 *addr = dev->bar_addr +
0079 MLX5_GET64(alloc_memic_out, out, memic_start_addr);
0080
0081 return 0;
0082 }
0083
0084 return -ENOMEM;
0085 }
0086
0087 void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
0088 u64 length)
0089 {
0090 struct mlx5_core_dev *dev = dm->dev;
0091 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
0092 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
0093 u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
0094 u64 start_page_idx;
0095 int err;
0096
0097 addr -= dev->bar_addr;
0098 start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
0099
0100 MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
0101 MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
0102 MLX5_SET(dealloc_memic_in, in, memic_size, length);
0103
0104 err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
0105 if (err)
0106 return;
0107
0108 spin_lock(&dm->lock);
0109 bitmap_clear(dm->memic_alloc_pages,
0110 start_page_idx, num_pages);
0111 spin_unlock(&dm->lock);
0112 }
0113
0114 void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
0115 u8 operation)
0116 {
0117 u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
0118 struct mlx5_core_dev *dev = dm->dev;
0119
0120 MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
0121 MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC);
0122 MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
0123 MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
0124
0125 mlx5_cmd_exec_in(dev, modify_memic, in);
0126 }
0127
0128 static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
0129 u8 operation, phys_addr_t *op_addr)
0130 {
0131 u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {};
0132 u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
0133 struct mlx5_core_dev *dev = dm->dev;
0134 int err;
0135
0136 MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
0137 MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC);
0138 MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
0139 MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
0140
0141 err = mlx5_cmd_exec_inout(dev, modify_memic, in, out);
0142 if (err)
0143 return err;
0144
0145 *op_addr = dev->bar_addr +
0146 MLX5_GET64(modify_memic_out, out, memic_operation_addr);
0147 return 0;
0148 }
0149
0150 static int add_dm_mmap_entry(struct ib_ucontext *context,
0151 struct mlx5_user_mmap_entry *mentry, u8 mmap_flag,
0152 size_t size, u64 address)
0153 {
0154 mentry->mmap_flag = mmap_flag;
0155 mentry->address = address;
0156
0157 return rdma_user_mmap_entry_insert_range(
0158 context, &mentry->rdma_entry, size,
0159 MLX5_IB_MMAP_DEVICE_MEM << 16,
0160 (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
0161 }
0162
0163 static void mlx5_ib_dm_memic_free(struct kref *kref)
0164 {
0165 struct mlx5_ib_dm_memic *dm =
0166 container_of(kref, struct mlx5_ib_dm_memic, ref);
0167 struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
0168
0169 mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
0170 kfree(dm);
0171 }
0172
0173 static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry,
0174 struct uverbs_attr_bundle *attrs)
0175 {
0176 u64 start_offset;
0177 u16 page_idx;
0178 int err;
0179
0180 page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF;
0181 start_offset = op_entry->op_addr & ~PAGE_MASK;
0182 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
0183 &page_idx, sizeof(page_idx));
0184 if (err)
0185 return err;
0186
0187 return uverbs_copy_to(attrs,
0188 MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
0189 &start_offset, sizeof(start_offset));
0190 }
0191
0192 static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
0193 struct uverbs_attr_bundle *attrs)
0194 {
0195 struct mlx5_ib_dm_op_entry *op_entry;
0196
0197 op_entry = xa_load(&dm->ops, op);
0198 if (!op_entry)
0199 return -ENOENT;
0200
0201 return copy_op_to_user(op_entry, attrs);
0202 }
0203
0204 static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
0205 struct uverbs_attr_bundle *attrs)
0206 {
0207 struct ib_uobject *uobj = uverbs_attr_get_uobject(
0208 attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE);
0209 struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
0210 struct ib_dm *ibdm = uobj->object;
0211 struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
0212 struct mlx5_ib_dm_op_entry *op_entry;
0213 int err;
0214 u8 op;
0215
0216 err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP);
0217 if (err)
0218 return err;
0219
0220 if (op >= BITS_PER_TYPE(u32))
0221 return -EOPNOTSUPP;
0222
0223 if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
0224 return -EOPNOTSUPP;
0225
0226 mutex_lock(&dm->ops_xa_lock);
0227 err = map_existing_op(dm, op, attrs);
0228 if (!err || err != -ENOENT)
0229 goto err_unlock;
0230
0231 op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL);
0232 if (!op_entry)
0233 goto err_unlock;
0234
0235 err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
0236 &op_entry->op_addr);
0237 if (err) {
0238 kfree(op_entry);
0239 goto err_unlock;
0240 }
0241 op_entry->op = op;
0242 op_entry->dm = dm;
0243
0244 err = add_dm_mmap_entry(uobj->context, &op_entry->mentry,
0245 MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
0246 op_entry->op_addr & PAGE_MASK);
0247 if (err) {
0248 mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
0249 kfree(op_entry);
0250 goto err_unlock;
0251 }
0252
0253 kref_get(&dm->ref);
0254
0255 err = copy_op_to_user(op_entry, attrs);
0256 if (err)
0257 goto err_remove;
0258
0259 err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
0260 if (err)
0261 goto err_remove;
0262 mutex_unlock(&dm->ops_xa_lock);
0263
0264 return 0;
0265
0266 err_remove:
0267 rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry);
0268 err_unlock:
0269 mutex_unlock(&dm->ops_xa_lock);
0270
0271 return err;
0272 }
0273
0274 static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
0275 struct ib_dm_alloc_attr *attr,
0276 struct uverbs_attr_bundle *attrs)
0277 {
0278 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
0279 struct mlx5_ib_dm_memic *dm;
0280 u64 start_offset;
0281 u16 page_idx;
0282 int err;
0283 u64 address;
0284
0285 if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
0286 return ERR_PTR(-EOPNOTSUPP);
0287
0288 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
0289 if (!dm)
0290 return ERR_PTR(-ENOMEM);
0291
0292 dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
0293 dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
0294 dm->base.ibdm.device = ctx->device;
0295
0296 kref_init(&dm->ref);
0297 xa_init(&dm->ops);
0298 mutex_init(&dm->ops_xa_lock);
0299 dm->req_length = attr->length;
0300
0301 err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
0302 dm->base.size, attr->alignment);
0303 if (err) {
0304 kfree(dm);
0305 return ERR_PTR(err);
0306 }
0307
0308 address = dm->base.dev_addr & PAGE_MASK;
0309 err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
0310 dm->base.size, address);
0311 if (err) {
0312 mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
0313 kfree(dm);
0314 return ERR_PTR(err);
0315 }
0316
0317 page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
0318 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
0319 &page_idx, sizeof(page_idx));
0320 if (err)
0321 goto err_copy;
0322
0323 start_offset = dm->base.dev_addr & ~PAGE_MASK;
0324 err = uverbs_copy_to(attrs,
0325 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
0326 &start_offset, sizeof(start_offset));
0327 if (err)
0328 goto err_copy;
0329
0330 return &dm->base.ibdm;
0331
0332 err_copy:
0333 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
0334 return ERR_PTR(err);
0335 }
0336
0337 static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
0338 {
0339 switch (uapi_type) {
0340 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
0341 return MLX5_SW_ICM_TYPE_HEADER_MODIFY;
0342 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
0343 return MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
0344 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
0345 default:
0346 return MLX5_SW_ICM_TYPE_STEERING;
0347 }
0348 }
0349
0350 static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
0351 struct ib_dm_alloc_attr *attr,
0352 struct uverbs_attr_bundle *attrs,
0353 int type)
0354 {
0355 struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
0356 enum mlx5_sw_icm_type icm_type;
0357 struct mlx5_ib_dm_icm *dm;
0358 u64 act_size;
0359 int err;
0360
0361 if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW))
0362 return ERR_PTR(-EPERM);
0363
0364 switch (type) {
0365 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
0366 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
0367 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
0368 MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
0369 MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
0370 MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2)))
0371 return ERR_PTR(-EOPNOTSUPP);
0372 break;
0373 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
0374 if (!MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
0375 !MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))
0376 return ERR_PTR(-EOPNOTSUPP);
0377 break;
0378 default:
0379 return ERR_PTR(-EOPNOTSUPP);
0380 }
0381
0382 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
0383 if (!dm)
0384 return ERR_PTR(-ENOMEM);
0385
0386 dm->base.type = type;
0387 dm->base.ibdm.device = ctx->device;
0388
0389
0390
0391
0392 act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
0393 act_size = roundup_pow_of_two(act_size);
0394
0395 dm->base.size = act_size;
0396 icm_type = get_icm_type(type);
0397
0398 err = mlx5_dm_sw_icm_alloc(dev, icm_type, act_size, attr->alignment,
0399 to_mucontext(ctx)->devx_uid,
0400 &dm->base.dev_addr, &dm->obj_id);
0401 if (err)
0402 goto free;
0403
0404 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
0405 &dm->base.dev_addr, sizeof(dm->base.dev_addr));
0406 if (err) {
0407 mlx5_dm_sw_icm_dealloc(dev, icm_type, dm->base.size,
0408 to_mucontext(ctx)->devx_uid,
0409 dm->base.dev_addr, dm->obj_id);
0410 goto free;
0411 }
0412 return &dm->base.ibdm;
0413 free:
0414 kfree(dm);
0415 return ERR_PTR(err);
0416 }
0417
0418 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
0419 struct ib_ucontext *context,
0420 struct ib_dm_alloc_attr *attr,
0421 struct uverbs_attr_bundle *attrs)
0422 {
0423 enum mlx5_ib_uapi_dm_type type;
0424 int err;
0425
0426 err = uverbs_get_const_default(&type, attrs,
0427 MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
0428 MLX5_IB_UAPI_DM_TYPE_MEMIC);
0429 if (err)
0430 return ERR_PTR(err);
0431
0432 mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
0433 type, attr->length, attr->alignment);
0434
0435 switch (type) {
0436 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
0437 return handle_alloc_dm_memic(context, attr, attrs);
0438 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
0439 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
0440 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
0441 return handle_alloc_dm_sw_icm(context, attr, attrs, type);
0442 default:
0443 return ERR_PTR(-EOPNOTSUPP);
0444 }
0445 }
0446
0447 static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
0448 {
0449 struct mlx5_ib_dm_op_entry *entry;
0450 unsigned long idx;
0451
0452 mutex_lock(&dm->ops_xa_lock);
0453 xa_for_each(&dm->ops, idx, entry) {
0454 xa_erase(&dm->ops, idx);
0455 rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry);
0456 }
0457 mutex_unlock(&dm->ops_xa_lock);
0458 }
0459
0460 static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
0461 {
0462 dm_memic_remove_ops(dm);
0463 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
0464 }
0465
0466 static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx,
0467 struct mlx5_ib_dm_icm *dm)
0468 {
0469 enum mlx5_sw_icm_type type = get_icm_type(dm->base.type);
0470 struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
0471 int err;
0472
0473 err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
0474 dm->base.dev_addr, dm->obj_id);
0475 if (!err)
0476 kfree(dm);
0477 return 0;
0478 }
0479
0480 static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
0481 struct uverbs_attr_bundle *attrs)
0482 {
0483 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
0484 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
0485 struct mlx5_ib_dm *dm = to_mdm(ibdm);
0486
0487 switch (dm->type) {
0488 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
0489 mlx5_dm_memic_dealloc(to_memic(ibdm));
0490 return 0;
0491 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
0492 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
0493 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
0494 return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
0495 default:
0496 return -EOPNOTSUPP;
0497 }
0498 }
0499
0500 static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)(
0501 struct uverbs_attr_bundle *attrs)
0502 {
0503 struct ib_dm *ibdm =
0504 uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE);
0505 struct mlx5_ib_dm *dm = to_mdm(ibdm);
0506 struct mlx5_ib_dm_memic *memic;
0507 u64 start_offset;
0508 u16 page_idx;
0509 int err;
0510
0511 if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
0512 return -EOPNOTSUPP;
0513
0514 memic = to_memic(ibdm);
0515 page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF;
0516 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
0517 &page_idx, sizeof(page_idx));
0518 if (err)
0519 return err;
0520
0521 start_offset = memic->base.dev_addr & ~PAGE_MASK;
0522 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
0523 &start_offset, sizeof(start_offset));
0524 if (err)
0525 return err;
0526
0527 return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
0528 &memic->req_length,
0529 sizeof(memic->req_length));
0530 }
0531
0532 void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
0533 struct mlx5_user_mmap_entry *mentry)
0534 {
0535 struct mlx5_ib_dm_op_entry *op_entry;
0536 struct mlx5_ib_dm_memic *mdm;
0537
0538 switch (mentry->mmap_flag) {
0539 case MLX5_IB_MMAP_TYPE_MEMIC:
0540 mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry);
0541 kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
0542 break;
0543 case MLX5_IB_MMAP_TYPE_MEMIC_OP:
0544 op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry,
0545 mentry);
0546 mdm = op_entry->dm;
0547 mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,
0548 op_entry->op);
0549 kfree(op_entry);
0550 kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
0551 break;
0552 default:
0553 WARN_ON(true);
0554 }
0555 }
0556
0557 DECLARE_UVERBS_NAMED_METHOD(
0558 MLX5_IB_METHOD_DM_QUERY,
0559 UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM,
0560 UVERBS_ACCESS_READ, UA_MANDATORY),
0561 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
0562 UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
0563 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
0564 UVERBS_ATTR_TYPE(u16), UA_MANDATORY),
0565 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
0566 UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
0567
0568 ADD_UVERBS_ATTRIBUTES_SIMPLE(
0569 mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC,
0570 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
0571 UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
0572 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
0573 UVERBS_ATTR_TYPE(u16), UA_OPTIONAL),
0574 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
0575 enum mlx5_ib_uapi_dm_type, UA_OPTIONAL));
0576
0577 DECLARE_UVERBS_NAMED_METHOD(
0578 MLX5_IB_METHOD_DM_MAP_OP_ADDR,
0579 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE,
0580 UVERBS_OBJECT_DM,
0581 UVERBS_ACCESS_READ,
0582 UA_MANDATORY),
0583 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
0584 UVERBS_ATTR_TYPE(u8),
0585 UA_MANDATORY),
0586 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
0587 UVERBS_ATTR_TYPE(u64),
0588 UA_MANDATORY),
0589 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
0590 UVERBS_ATTR_TYPE(u16),
0591 UA_OPTIONAL));
0592
0593 DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM,
0594 &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR),
0595 &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY));
0596
0597 const struct uapi_definition mlx5_ib_dm_defs[] = {
0598 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
0599 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM),
0600 {},
0601 };
0602
0603 const struct ib_device_ops mlx5_ib_dev_dm_ops = {
0604 .alloc_dm = mlx5_ib_alloc_dm,
0605 .dealloc_dm = mlx5_ib_dealloc_dm,
0606 .reg_dm_mr = mlx5_ib_reg_dm_mr,
0607 };