0001
0002
0003
0004
0005
0006 #include <rdma/uverbs_ioctl.h>
0007 #include <rdma/mlx5_user_ioctl_cmds.h>
0008 #include <rdma/mlx5_user_ioctl_verbs.h>
0009 #include <linux/mlx5/driver.h>
0010 #include "mlx5_ib.h"
0011
0012 #define UVERBS_MODULE_NAME mlx5_ib
0013 #include <rdma/uverbs_named_ioctl.h>
0014
0015 static bool pp_is_supported(struct ib_device *device)
0016 {
0017 struct mlx5_ib_dev *dev = to_mdev(device);
0018
0019 return (MLX5_CAP_GEN(dev->mdev, qos) &&
0020 MLX5_CAP_QOS(dev->mdev, packet_pacing) &&
0021 MLX5_CAP_QOS(dev->mdev, packet_pacing_uid));
0022 }
0023
0024 static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
0025 struct uverbs_attr_bundle *attrs)
0026 {
0027 u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
0028 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
0029 MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
0030 struct mlx5_ib_dev *dev;
0031 struct mlx5_ib_ucontext *c;
0032 struct mlx5_ib_pp *pp_entry;
0033 void *in_ctx;
0034 u16 uid;
0035 int inlen;
0036 u32 flags;
0037 int err;
0038
0039 c = to_mucontext(ib_uverbs_get_ucontext(attrs));
0040 if (IS_ERR(c))
0041 return PTR_ERR(c);
0042
0043
0044 if (!c->devx_uid)
0045 return -EINVAL;
0046
0047 dev = to_mdev(c->ibucontext.device);
0048 pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL);
0049 if (!pp_entry)
0050 return -ENOMEM;
0051
0052 in_ctx = uverbs_attr_get_alloced_ptr(attrs,
0053 MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
0054 inlen = uverbs_attr_get_len(attrs,
0055 MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
0056 memcpy(rl_raw, in_ctx, inlen);
0057 err = uverbs_get_flags32(&flags, attrs,
0058 MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
0059 MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX);
0060 if (err)
0061 goto err;
0062
0063 uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ?
0064 c->devx_uid : MLX5_SHARED_RESOURCE_UID;
0065
0066 err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid,
0067 (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX),
0068 &pp_entry->index);
0069 if (err)
0070 goto err;
0071
0072 pp_entry->mdev = dev->mdev;
0073 uobj->object = pp_entry;
0074 uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
0075
0076 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
0077 &pp_entry->index, sizeof(pp_entry->index));
0078 return err;
0079
0080 err:
0081 kfree(pp_entry);
0082 return err;
0083 }
0084
0085 static int pp_obj_cleanup(struct ib_uobject *uobject,
0086 enum rdma_remove_reason why,
0087 struct uverbs_attr_bundle *attrs)
0088 {
0089 struct mlx5_ib_pp *pp_entry = uobject->object;
0090
0091 mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index);
0092 kfree(pp_entry);
0093 return 0;
0094 }
0095
0096 DECLARE_UVERBS_NAMED_METHOD(
0097 MLX5_IB_METHOD_PP_OBJ_ALLOC,
0098 UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE,
0099 MLX5_IB_OBJECT_PP,
0100 UVERBS_ACCESS_NEW,
0101 UA_MANDATORY),
0102 UVERBS_ATTR_PTR_IN(
0103 MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
0104 UVERBS_ATTR_SIZE(1,
0105 MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)),
0106 UA_MANDATORY,
0107 UA_ALLOC_AND_COPY),
0108 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
0109 enum mlx5_ib_uapi_pp_alloc_flags,
0110 UA_MANDATORY),
0111 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
0112 UVERBS_ATTR_TYPE(u16),
0113 UA_MANDATORY));
0114
0115 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
0116 MLX5_IB_METHOD_PP_OBJ_DESTROY,
0117 UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE,
0118 MLX5_IB_OBJECT_PP,
0119 UVERBS_ACCESS_DESTROY,
0120 UA_MANDATORY));
0121
0122 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP,
0123 UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup),
0124 &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC),
0125 &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY));
0126
0127
0128 const struct uapi_definition mlx5_ib_qos_defs[] = {
0129 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
0130 MLX5_IB_OBJECT_PP,
0131 UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)),
0132 {},
0133 };