Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /*
0003  * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
0004  */
0005 
0006 #include <rdma/ib_user_verbs.h>
0007 #include <rdma/ib_verbs.h>
0008 #include <rdma/uverbs_types.h>
0009 #include <rdma/uverbs_ioctl.h>
0010 #include <rdma/mlx5_user_ioctl_cmds.h>
0011 #include <rdma/mlx5_user_ioctl_verbs.h>
0012 #include <rdma/ib_umem.h>
0013 #include <rdma/uverbs_std_types.h>
0014 #include <linux/mlx5/driver.h>
0015 #include <linux/mlx5/fs.h>
0016 #include "mlx5_ib.h"
0017 #include "devx.h"
0018 #include "qp.h"
0019 #include <linux/xarray.h>
0020 
0021 #define UVERBS_MODULE_NAME mlx5_ib
0022 #include <rdma/uverbs_named_ioctl.h>
0023 
0024 static void dispatch_event_fd(struct list_head *fd_list, const void *data);
0025 
0026 enum devx_obj_flags {
0027     DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
0028     DEVX_OBJ_FLAGS_DCT = 1 << 1,
0029     DEVX_OBJ_FLAGS_CQ = 1 << 2,
0030 };
0031 
0032 struct devx_async_data {
0033     struct mlx5_ib_dev *mdev;
0034     struct list_head list;
0035     struct devx_async_cmd_event_file *ev_file;
0036     struct mlx5_async_work cb_work;
0037     u16 cmd_out_len;
0038     /* must be last field in this structure */
0039     struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
0040 };
0041 
0042 struct devx_async_event_data {
0043     struct list_head list; /* headed in ev_file->event_list */
0044     struct mlx5_ib_uapi_devx_async_event_hdr hdr;
0045 };
0046 
0047 /* first level XA value data structure */
0048 struct devx_event {
0049     struct xarray object_ids; /* second XA level, Key = object id */
0050     struct list_head unaffiliated_list;
0051 };
0052 
0053 /* second level XA value data structure */
0054 struct devx_obj_event {
0055     struct rcu_head rcu;
0056     struct list_head obj_sub_list;
0057 };
0058 
0059 struct devx_event_subscription {
0060     struct list_head file_list; /* headed in ev_file->
0061                      * subscribed_events_list
0062                      */
0063     struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
0064                    * devx_obj_event->obj_sub_list
0065                    */
0066     struct list_head obj_list; /* headed in devx_object */
0067     struct list_head event_list; /* headed in ev_file->event_list or in
0068                       * temp list via subscription
0069                       */
0070 
0071     u8 is_cleaned:1;
0072     u32 xa_key_level1;
0073     u32 xa_key_level2;
0074     struct rcu_head rcu;
0075     u64 cookie;
0076     struct devx_async_event_file *ev_file;
0077     struct eventfd_ctx *eventfd;
0078 };
0079 
0080 struct devx_async_event_file {
0081     struct ib_uobject uobj;
0082     /* Head of events that are subscribed to this FD */
0083     struct list_head subscribed_events_list;
0084     spinlock_t lock;
0085     wait_queue_head_t poll_wait;
0086     struct list_head event_list;
0087     struct mlx5_ib_dev *dev;
0088     u8 omit_data:1;
0089     u8 is_overflow_err:1;
0090     u8 is_destroyed:1;
0091 };
0092 
0093 struct devx_umem {
0094     struct mlx5_core_dev        *mdev;
0095     struct ib_umem          *umem;
0096     u32             dinlen;
0097     u32             dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
0098 };
0099 
0100 struct devx_umem_reg_cmd {
0101     void                *in;
0102     u32             inlen;
0103     u32             out[MLX5_ST_SZ_DW(create_umem_out)];
0104 };
0105 
0106 static struct mlx5_ib_ucontext *
0107 devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
0108 {
0109     return to_mucontext(ib_uverbs_get_ucontext(attrs));
0110 }
0111 
0112 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
0113 {
0114     u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
0115     u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
0116     void *uctx;
0117     int err;
0118     u16 uid;
0119     u32 cap = 0;
0120 
0121     /* 0 means not supported */
0122     if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
0123         return -EINVAL;
0124 
0125     uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
0126     if (is_user && capable(CAP_NET_RAW) &&
0127         (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
0128         cap |= MLX5_UCTX_CAP_RAW_TX;
0129     if (is_user && capable(CAP_SYS_RAWIO) &&
0130         (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
0131          MLX5_UCTX_CAP_INTERNAL_DEV_RES))
0132         cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
0133 
0134     MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
0135     MLX5_SET(uctx, uctx, cap, cap);
0136 
0137     err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
0138     if (err)
0139         return err;
0140 
0141     uid = MLX5_GET(create_uctx_out, out, uid);
0142     return uid;
0143 }
0144 
0145 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
0146 {
0147     u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
0148     u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
0149 
0150     MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
0151     MLX5_SET(destroy_uctx_in, in, uid, uid);
0152 
0153     mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
0154 }
0155 
0156 static bool is_legacy_unaffiliated_event_num(u16 event_num)
0157 {
0158     switch (event_num) {
0159     case MLX5_EVENT_TYPE_PORT_CHANGE:
0160         return true;
0161     default:
0162         return false;
0163     }
0164 }
0165 
0166 static bool is_legacy_obj_event_num(u16 event_num)
0167 {
0168     switch (event_num) {
0169     case MLX5_EVENT_TYPE_PATH_MIG:
0170     case MLX5_EVENT_TYPE_COMM_EST:
0171     case MLX5_EVENT_TYPE_SQ_DRAINED:
0172     case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
0173     case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
0174     case MLX5_EVENT_TYPE_CQ_ERROR:
0175     case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
0176     case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
0177     case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
0178     case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
0179     case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
0180     case MLX5_EVENT_TYPE_DCT_DRAINED:
0181     case MLX5_EVENT_TYPE_COMP:
0182     case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
0183     case MLX5_EVENT_TYPE_XRQ_ERROR:
0184         return true;
0185     default:
0186         return false;
0187     }
0188 }
0189 
0190 static u16 get_legacy_obj_type(u16 opcode)
0191 {
0192     switch (opcode) {
0193     case MLX5_CMD_OP_CREATE_RQ:
0194         return MLX5_EVENT_QUEUE_TYPE_RQ;
0195     case MLX5_CMD_OP_CREATE_QP:
0196         return MLX5_EVENT_QUEUE_TYPE_QP;
0197     case MLX5_CMD_OP_CREATE_SQ:
0198         return MLX5_EVENT_QUEUE_TYPE_SQ;
0199     case MLX5_CMD_OP_CREATE_DCT:
0200         return MLX5_EVENT_QUEUE_TYPE_DCT;
0201     default:
0202         return 0;
0203     }
0204 }
0205 
0206 static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
0207 {
0208     u16 opcode;
0209 
0210     opcode = (obj->obj_id >> 32) & 0xffff;
0211 
0212     if (is_legacy_obj_event_num(event_num))
0213         return get_legacy_obj_type(opcode);
0214 
0215     switch (opcode) {
0216     case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
0217         return (obj->obj_id >> 48);
0218     case MLX5_CMD_OP_CREATE_RQ:
0219         return MLX5_OBJ_TYPE_RQ;
0220     case MLX5_CMD_OP_CREATE_QP:
0221         return MLX5_OBJ_TYPE_QP;
0222     case MLX5_CMD_OP_CREATE_SQ:
0223         return MLX5_OBJ_TYPE_SQ;
0224     case MLX5_CMD_OP_CREATE_DCT:
0225         return MLX5_OBJ_TYPE_DCT;
0226     case MLX5_CMD_OP_CREATE_TIR:
0227         return MLX5_OBJ_TYPE_TIR;
0228     case MLX5_CMD_OP_CREATE_TIS:
0229         return MLX5_OBJ_TYPE_TIS;
0230     case MLX5_CMD_OP_CREATE_PSV:
0231         return MLX5_OBJ_TYPE_PSV;
0232     case MLX5_OBJ_TYPE_MKEY:
0233         return MLX5_OBJ_TYPE_MKEY;
0234     case MLX5_CMD_OP_CREATE_RMP:
0235         return MLX5_OBJ_TYPE_RMP;
0236     case MLX5_CMD_OP_CREATE_XRC_SRQ:
0237         return MLX5_OBJ_TYPE_XRC_SRQ;
0238     case MLX5_CMD_OP_CREATE_XRQ:
0239         return MLX5_OBJ_TYPE_XRQ;
0240     case MLX5_CMD_OP_CREATE_RQT:
0241         return MLX5_OBJ_TYPE_RQT;
0242     case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
0243         return MLX5_OBJ_TYPE_FLOW_COUNTER;
0244     case MLX5_CMD_OP_CREATE_CQ:
0245         return MLX5_OBJ_TYPE_CQ;
0246     default:
0247         return 0;
0248     }
0249 }
0250 
0251 static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
0252 {
0253     switch (event_type) {
0254     case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
0255     case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
0256     case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
0257     case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
0258     case MLX5_EVENT_TYPE_PATH_MIG:
0259     case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
0260     case MLX5_EVENT_TYPE_COMM_EST:
0261     case MLX5_EVENT_TYPE_SQ_DRAINED:
0262     case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
0263     case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
0264         return eqe->data.qp_srq.type;
0265     case MLX5_EVENT_TYPE_CQ_ERROR:
0266     case MLX5_EVENT_TYPE_XRQ_ERROR:
0267         return 0;
0268     case MLX5_EVENT_TYPE_DCT_DRAINED:
0269     case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
0270         return MLX5_EVENT_QUEUE_TYPE_DCT;
0271     default:
0272         return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
0273     }
0274 }
0275 
0276 static u32 get_dec_obj_id(u64 obj_id)
0277 {
0278     return (obj_id & 0xffffffff);
0279 }
0280 
0281 /*
0282  * As the obj_id in the firmware is not globally unique the object type
0283  * must be considered upon checking for a valid object id.
0284  * For that the opcode of the creator command is encoded as part of the obj_id.
0285  */
0286 static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
0287 {
0288     return ((u64)opcode << 32) | obj_id;
0289 }
0290 
0291 static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
0292 {
0293     switch (opcode) {
0294     case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
0295         return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
0296     case MLX5_CMD_OP_CREATE_UMEM:
0297         return MLX5_GET(create_umem_out, out, umem_id);
0298     case MLX5_CMD_OP_CREATE_MKEY:
0299         return MLX5_GET(create_mkey_out, out, mkey_index);
0300     case MLX5_CMD_OP_CREATE_CQ:
0301         return MLX5_GET(create_cq_out, out, cqn);
0302     case MLX5_CMD_OP_ALLOC_PD:
0303         return MLX5_GET(alloc_pd_out, out, pd);
0304     case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
0305         return MLX5_GET(alloc_transport_domain_out, out,
0306                 transport_domain);
0307     case MLX5_CMD_OP_CREATE_RMP:
0308         return MLX5_GET(create_rmp_out, out, rmpn);
0309     case MLX5_CMD_OP_CREATE_SQ:
0310         return MLX5_GET(create_sq_out, out, sqn);
0311     case MLX5_CMD_OP_CREATE_RQ:
0312         return MLX5_GET(create_rq_out, out, rqn);
0313     case MLX5_CMD_OP_CREATE_RQT:
0314         return MLX5_GET(create_rqt_out, out, rqtn);
0315     case MLX5_CMD_OP_CREATE_TIR:
0316         return MLX5_GET(create_tir_out, out, tirn);
0317     case MLX5_CMD_OP_CREATE_TIS:
0318         return MLX5_GET(create_tis_out, out, tisn);
0319     case MLX5_CMD_OP_ALLOC_Q_COUNTER:
0320         return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
0321     case MLX5_CMD_OP_CREATE_FLOW_TABLE:
0322         return MLX5_GET(create_flow_table_out, out, table_id);
0323     case MLX5_CMD_OP_CREATE_FLOW_GROUP:
0324         return MLX5_GET(create_flow_group_out, out, group_id);
0325     case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
0326         return MLX5_GET(set_fte_in, in, flow_index);
0327     case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
0328         return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
0329     case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
0330         return MLX5_GET(alloc_packet_reformat_context_out, out,
0331                 packet_reformat_id);
0332     case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
0333         return MLX5_GET(alloc_modify_header_context_out, out,
0334                 modify_header_id);
0335     case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
0336         return MLX5_GET(create_scheduling_element_out, out,
0337                 scheduling_element_id);
0338     case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
0339         return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
0340     case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
0341         return MLX5_GET(set_l2_table_entry_in, in, table_index);
0342     case MLX5_CMD_OP_CREATE_QP:
0343         return MLX5_GET(create_qp_out, out, qpn);
0344     case MLX5_CMD_OP_CREATE_SRQ:
0345         return MLX5_GET(create_srq_out, out, srqn);
0346     case MLX5_CMD_OP_CREATE_XRC_SRQ:
0347         return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
0348     case MLX5_CMD_OP_CREATE_DCT:
0349         return MLX5_GET(create_dct_out, out, dctn);
0350     case MLX5_CMD_OP_CREATE_XRQ:
0351         return MLX5_GET(create_xrq_out, out, xrqn);
0352     case MLX5_CMD_OP_ATTACH_TO_MCG:
0353         return MLX5_GET(attach_to_mcg_in, in, qpn);
0354     case MLX5_CMD_OP_ALLOC_XRCD:
0355         return MLX5_GET(alloc_xrcd_out, out, xrcd);
0356     case MLX5_CMD_OP_CREATE_PSV:
0357         return MLX5_GET(create_psv_out, out, psv0_index);
0358     default:
0359         /* The entry must match to one of the devx_is_obj_create_cmd */
0360         WARN_ON(true);
0361         return 0;
0362     }
0363 }
0364 
0365 static u64 devx_get_obj_id(const void *in)
0366 {
0367     u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
0368     u64 obj_id;
0369 
0370     switch (opcode) {
0371     case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
0372     case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
0373         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
0374                     MLX5_GET(general_obj_in_cmd_hdr, in,
0375                          obj_type) << 16,
0376                     MLX5_GET(general_obj_in_cmd_hdr, in,
0377                          obj_id));
0378         break;
0379     case MLX5_CMD_OP_QUERY_MKEY:
0380         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
0381                     MLX5_GET(query_mkey_in, in,
0382                          mkey_index));
0383         break;
0384     case MLX5_CMD_OP_QUERY_CQ:
0385         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
0386                     MLX5_GET(query_cq_in, in, cqn));
0387         break;
0388     case MLX5_CMD_OP_MODIFY_CQ:
0389         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
0390                     MLX5_GET(modify_cq_in, in, cqn));
0391         break;
0392     case MLX5_CMD_OP_QUERY_SQ:
0393         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
0394                     MLX5_GET(query_sq_in, in, sqn));
0395         break;
0396     case MLX5_CMD_OP_MODIFY_SQ:
0397         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
0398                     MLX5_GET(modify_sq_in, in, sqn));
0399         break;
0400     case MLX5_CMD_OP_QUERY_RQ:
0401         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
0402                     MLX5_GET(query_rq_in, in, rqn));
0403         break;
0404     case MLX5_CMD_OP_MODIFY_RQ:
0405         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
0406                     MLX5_GET(modify_rq_in, in, rqn));
0407         break;
0408     case MLX5_CMD_OP_QUERY_RMP:
0409         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
0410                     MLX5_GET(query_rmp_in, in, rmpn));
0411         break;
0412     case MLX5_CMD_OP_MODIFY_RMP:
0413         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
0414                     MLX5_GET(modify_rmp_in, in, rmpn));
0415         break;
0416     case MLX5_CMD_OP_QUERY_RQT:
0417         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
0418                     MLX5_GET(query_rqt_in, in, rqtn));
0419         break;
0420     case MLX5_CMD_OP_MODIFY_RQT:
0421         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
0422                     MLX5_GET(modify_rqt_in, in, rqtn));
0423         break;
0424     case MLX5_CMD_OP_QUERY_TIR:
0425         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
0426                     MLX5_GET(query_tir_in, in, tirn));
0427         break;
0428     case MLX5_CMD_OP_MODIFY_TIR:
0429         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
0430                     MLX5_GET(modify_tir_in, in, tirn));
0431         break;
0432     case MLX5_CMD_OP_QUERY_TIS:
0433         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
0434                     MLX5_GET(query_tis_in, in, tisn));
0435         break;
0436     case MLX5_CMD_OP_MODIFY_TIS:
0437         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
0438                     MLX5_GET(modify_tis_in, in, tisn));
0439         break;
0440     case MLX5_CMD_OP_QUERY_FLOW_TABLE:
0441         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
0442                     MLX5_GET(query_flow_table_in, in,
0443                          table_id));
0444         break;
0445     case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
0446         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
0447                     MLX5_GET(modify_flow_table_in, in,
0448                          table_id));
0449         break;
0450     case MLX5_CMD_OP_QUERY_FLOW_GROUP:
0451         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
0452                     MLX5_GET(query_flow_group_in, in,
0453                          group_id));
0454         break;
0455     case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
0456         obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
0457                     MLX5_GET(query_fte_in, in,
0458                          flow_index));
0459         break;
0460     case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
0461         obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
0462                     MLX5_GET(set_fte_in, in, flow_index));
0463         break;
0464     case MLX5_CMD_OP_QUERY_Q_COUNTER:
0465         obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
0466                     MLX5_GET(query_q_counter_in, in,
0467                          counter_set_id));
0468         break;
0469     case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
0470         obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
0471                     MLX5_GET(query_flow_counter_in, in,
0472                          flow_counter_id));
0473         break;
0474     case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
0475         obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
0476                     MLX5_GET(query_modify_header_context_in,
0477                          in, modify_header_id));
0478         break;
0479     case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
0480         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
0481                     MLX5_GET(query_scheduling_element_in,
0482                          in, scheduling_element_id));
0483         break;
0484     case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
0485         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
0486                     MLX5_GET(modify_scheduling_element_in,
0487                          in, scheduling_element_id));
0488         break;
0489     case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
0490         obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
0491                     MLX5_GET(add_vxlan_udp_dport_in, in,
0492                          vxlan_udp_port));
0493         break;
0494     case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
0495         obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
0496                     MLX5_GET(query_l2_table_entry_in, in,
0497                          table_index));
0498         break;
0499     case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
0500         obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
0501                     MLX5_GET(set_l2_table_entry_in, in,
0502                          table_index));
0503         break;
0504     case MLX5_CMD_OP_QUERY_QP:
0505         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0506                     MLX5_GET(query_qp_in, in, qpn));
0507         break;
0508     case MLX5_CMD_OP_RST2INIT_QP:
0509         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0510                     MLX5_GET(rst2init_qp_in, in, qpn));
0511         break;
0512     case MLX5_CMD_OP_INIT2INIT_QP:
0513         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0514                     MLX5_GET(init2init_qp_in, in, qpn));
0515         break;
0516     case MLX5_CMD_OP_INIT2RTR_QP:
0517         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0518                     MLX5_GET(init2rtr_qp_in, in, qpn));
0519         break;
0520     case MLX5_CMD_OP_RTR2RTS_QP:
0521         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0522                     MLX5_GET(rtr2rts_qp_in, in, qpn));
0523         break;
0524     case MLX5_CMD_OP_RTS2RTS_QP:
0525         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0526                     MLX5_GET(rts2rts_qp_in, in, qpn));
0527         break;
0528     case MLX5_CMD_OP_SQERR2RTS_QP:
0529         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0530                     MLX5_GET(sqerr2rts_qp_in, in, qpn));
0531         break;
0532     case MLX5_CMD_OP_2ERR_QP:
0533         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0534                     MLX5_GET(qp_2err_in, in, qpn));
0535         break;
0536     case MLX5_CMD_OP_2RST_QP:
0537         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0538                     MLX5_GET(qp_2rst_in, in, qpn));
0539         break;
0540     case MLX5_CMD_OP_QUERY_DCT:
0541         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
0542                     MLX5_GET(query_dct_in, in, dctn));
0543         break;
0544     case MLX5_CMD_OP_QUERY_XRQ:
0545     case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
0546     case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
0547         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
0548                     MLX5_GET(query_xrq_in, in, xrqn));
0549         break;
0550     case MLX5_CMD_OP_QUERY_XRC_SRQ:
0551         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
0552                     MLX5_GET(query_xrc_srq_in, in,
0553                          xrc_srqn));
0554         break;
0555     case MLX5_CMD_OP_ARM_XRC_SRQ:
0556         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
0557                     MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
0558         break;
0559     case MLX5_CMD_OP_QUERY_SRQ:
0560         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
0561                     MLX5_GET(query_srq_in, in, srqn));
0562         break;
0563     case MLX5_CMD_OP_ARM_RQ:
0564         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
0565                     MLX5_GET(arm_rq_in, in, srq_number));
0566         break;
0567     case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
0568         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
0569                     MLX5_GET(drain_dct_in, in, dctn));
0570         break;
0571     case MLX5_CMD_OP_ARM_XRQ:
0572     case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
0573     case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
0574     case MLX5_CMD_OP_MODIFY_XRQ:
0575         obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
0576                     MLX5_GET(arm_xrq_in, in, xrqn));
0577         break;
0578     case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
0579         obj_id = get_enc_obj_id
0580                 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
0581                  MLX5_GET(query_packet_reformat_context_in,
0582                       in, packet_reformat_id));
0583         break;
0584     default:
0585         obj_id = 0;
0586     }
0587 
0588     return obj_id;
0589 }
0590 
0591 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
0592                  struct ib_uobject *uobj, const void *in)
0593 {
0594     struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
0595     u64 obj_id = devx_get_obj_id(in);
0596 
0597     if (!obj_id)
0598         return false;
0599 
0600     switch (uobj_get_object_id(uobj)) {
0601     case UVERBS_OBJECT_CQ:
0602         return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
0603                       to_mcq(uobj->object)->mcq.cqn) ==
0604                       obj_id;
0605 
0606     case UVERBS_OBJECT_SRQ:
0607     {
0608         struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
0609         u16 opcode;
0610 
0611         switch (srq->common.res) {
0612         case MLX5_RES_XSRQ:
0613             opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
0614             break;
0615         case MLX5_RES_XRQ:
0616             opcode = MLX5_CMD_OP_CREATE_XRQ;
0617             break;
0618         default:
0619             if (!dev->mdev->issi)
0620                 opcode = MLX5_CMD_OP_CREATE_SRQ;
0621             else
0622                 opcode = MLX5_CMD_OP_CREATE_RMP;
0623         }
0624 
0625         return get_enc_obj_id(opcode,
0626                       to_msrq(uobj->object)->msrq.srqn) ==
0627                       obj_id;
0628     }
0629 
0630     case UVERBS_OBJECT_QP:
0631     {
0632         struct mlx5_ib_qp *qp = to_mqp(uobj->object);
0633 
0634         if (qp->type == IB_QPT_RAW_PACKET ||
0635             (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
0636             struct mlx5_ib_raw_packet_qp *raw_packet_qp =
0637                              &qp->raw_packet_qp;
0638             struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
0639             struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
0640 
0641             return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
0642                            rq->base.mqp.qpn) == obj_id ||
0643                 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
0644                            sq->base.mqp.qpn) == obj_id ||
0645                 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
0646                            rq->tirn) == obj_id ||
0647                 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
0648                            sq->tisn) == obj_id);
0649         }
0650 
0651         if (qp->type == MLX5_IB_QPT_DCT)
0652             return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
0653                           qp->dct.mdct.mqp.qpn) == obj_id;
0654         return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
0655                       qp->ibqp.qp_num) == obj_id;
0656     }
0657 
0658     case UVERBS_OBJECT_WQ:
0659         return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
0660                       to_mrwq(uobj->object)->core_qp.qpn) ==
0661                       obj_id;
0662 
0663     case UVERBS_OBJECT_RWQ_IND_TBL:
0664         return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
0665                       to_mrwq_ind_table(uobj->object)->rqtn) ==
0666                       obj_id;
0667 
0668     case MLX5_IB_OBJECT_DEVX_OBJ:
0669         return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
0670 
0671     default:
0672         return false;
0673     }
0674 }
0675 
0676 static void devx_set_umem_valid(const void *in)
0677 {
0678     u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
0679 
0680     switch (opcode) {
0681     case MLX5_CMD_OP_CREATE_MKEY:
0682         MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
0683         break;
0684     case MLX5_CMD_OP_CREATE_CQ:
0685     {
0686         void *cqc;
0687 
0688         MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
0689         cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
0690         MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
0691         break;
0692     }
0693     case MLX5_CMD_OP_CREATE_QP:
0694     {
0695         void *qpc;
0696 
0697         qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
0698         MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
0699         MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
0700         break;
0701     }
0702 
0703     case MLX5_CMD_OP_CREATE_RQ:
0704     {
0705         void *rqc, *wq;
0706 
0707         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
0708         wq  = MLX5_ADDR_OF(rqc, rqc, wq);
0709         MLX5_SET(wq, wq, dbr_umem_valid, 1);
0710         MLX5_SET(wq, wq, wq_umem_valid, 1);
0711         break;
0712     }
0713 
0714     case MLX5_CMD_OP_CREATE_SQ:
0715     {
0716         void *sqc, *wq;
0717 
0718         sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
0719         wq = MLX5_ADDR_OF(sqc, sqc, wq);
0720         MLX5_SET(wq, wq, dbr_umem_valid, 1);
0721         MLX5_SET(wq, wq, wq_umem_valid, 1);
0722         break;
0723     }
0724 
0725     case MLX5_CMD_OP_MODIFY_CQ:
0726         MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
0727         break;
0728 
0729     case MLX5_CMD_OP_CREATE_RMP:
0730     {
0731         void *rmpc, *wq;
0732 
0733         rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
0734         wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
0735         MLX5_SET(wq, wq, dbr_umem_valid, 1);
0736         MLX5_SET(wq, wq, wq_umem_valid, 1);
0737         break;
0738     }
0739 
0740     case MLX5_CMD_OP_CREATE_XRQ:
0741     {
0742         void *xrqc, *wq;
0743 
0744         xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
0745         wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
0746         MLX5_SET(wq, wq, dbr_umem_valid, 1);
0747         MLX5_SET(wq, wq, wq_umem_valid, 1);
0748         break;
0749     }
0750 
0751     case MLX5_CMD_OP_CREATE_XRC_SRQ:
0752     {
0753         void *xrc_srqc;
0754 
0755         MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
0756         xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
0757                     xrc_srq_context_entry);
0758         MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
0759         break;
0760     }
0761 
0762     default:
0763         return;
0764     }
0765 }
0766 
0767 static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
0768 {
0769     *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
0770 
0771     switch (*opcode) {
0772     case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
0773     case MLX5_CMD_OP_CREATE_MKEY:
0774     case MLX5_CMD_OP_CREATE_CQ:
0775     case MLX5_CMD_OP_ALLOC_PD:
0776     case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
0777     case MLX5_CMD_OP_CREATE_RMP:
0778     case MLX5_CMD_OP_CREATE_SQ:
0779     case MLX5_CMD_OP_CREATE_RQ:
0780     case MLX5_CMD_OP_CREATE_RQT:
0781     case MLX5_CMD_OP_CREATE_TIR:
0782     case MLX5_CMD_OP_CREATE_TIS:
0783     case MLX5_CMD_OP_ALLOC_Q_COUNTER:
0784     case MLX5_CMD_OP_CREATE_FLOW_TABLE:
0785     case MLX5_CMD_OP_CREATE_FLOW_GROUP:
0786     case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
0787     case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
0788     case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
0789     case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
0790     case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
0791     case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
0792     case MLX5_CMD_OP_CREATE_QP:
0793     case MLX5_CMD_OP_CREATE_SRQ:
0794     case MLX5_CMD_OP_CREATE_XRC_SRQ:
0795     case MLX5_CMD_OP_CREATE_DCT:
0796     case MLX5_CMD_OP_CREATE_XRQ:
0797     case MLX5_CMD_OP_ATTACH_TO_MCG:
0798     case MLX5_CMD_OP_ALLOC_XRCD:
0799         return true;
0800     case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
0801     {
0802         u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
0803         if (op_mod == 0)
0804             return true;
0805         return false;
0806     }
0807     case MLX5_CMD_OP_CREATE_PSV:
0808     {
0809         u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
0810 
0811         if (num_psv == 1)
0812             return true;
0813         return false;
0814     }
0815     default:
0816         return false;
0817     }
0818 }
0819 
0820 static bool devx_is_obj_modify_cmd(const void *in)
0821 {
0822     u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
0823 
0824     switch (opcode) {
0825     case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
0826     case MLX5_CMD_OP_MODIFY_CQ:
0827     case MLX5_CMD_OP_MODIFY_RMP:
0828     case MLX5_CMD_OP_MODIFY_SQ:
0829     case MLX5_CMD_OP_MODIFY_RQ:
0830     case MLX5_CMD_OP_MODIFY_RQT:
0831     case MLX5_CMD_OP_MODIFY_TIR:
0832     case MLX5_CMD_OP_MODIFY_TIS:
0833     case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
0834     case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
0835     case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
0836     case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
0837     case MLX5_CMD_OP_RST2INIT_QP:
0838     case MLX5_CMD_OP_INIT2RTR_QP:
0839     case MLX5_CMD_OP_INIT2INIT_QP:
0840     case MLX5_CMD_OP_RTR2RTS_QP:
0841     case MLX5_CMD_OP_RTS2RTS_QP:
0842     case MLX5_CMD_OP_SQERR2RTS_QP:
0843     case MLX5_CMD_OP_2ERR_QP:
0844     case MLX5_CMD_OP_2RST_QP:
0845     case MLX5_CMD_OP_ARM_XRC_SRQ:
0846     case MLX5_CMD_OP_ARM_RQ:
0847     case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
0848     case MLX5_CMD_OP_ARM_XRQ:
0849     case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
0850     case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
0851     case MLX5_CMD_OP_MODIFY_XRQ:
0852         return true;
0853     case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
0854     {
0855         u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
0856 
0857         if (op_mod == 1)
0858             return true;
0859         return false;
0860     }
0861     default:
0862         return false;
0863     }
0864 }
0865 
0866 static bool devx_is_obj_query_cmd(const void *in)
0867 {
0868     u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
0869 
0870     switch (opcode) {
0871     case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
0872     case MLX5_CMD_OP_QUERY_MKEY:
0873     case MLX5_CMD_OP_QUERY_CQ:
0874     case MLX5_CMD_OP_QUERY_RMP:
0875     case MLX5_CMD_OP_QUERY_SQ:
0876     case MLX5_CMD_OP_QUERY_RQ:
0877     case MLX5_CMD_OP_QUERY_RQT:
0878     case MLX5_CMD_OP_QUERY_TIR:
0879     case MLX5_CMD_OP_QUERY_TIS:
0880     case MLX5_CMD_OP_QUERY_Q_COUNTER:
0881     case MLX5_CMD_OP_QUERY_FLOW_TABLE:
0882     case MLX5_CMD_OP_QUERY_FLOW_GROUP:
0883     case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
0884     case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
0885     case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
0886     case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
0887     case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
0888     case MLX5_CMD_OP_QUERY_QP:
0889     case MLX5_CMD_OP_QUERY_SRQ:
0890     case MLX5_CMD_OP_QUERY_XRC_SRQ:
0891     case MLX5_CMD_OP_QUERY_DCT:
0892     case MLX5_CMD_OP_QUERY_XRQ:
0893     case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
0894     case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
0895     case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
0896         return true;
0897     default:
0898         return false;
0899     }
0900 }
0901 
0902 static bool devx_is_whitelist_cmd(void *in)
0903 {
0904     u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
0905 
0906     switch (opcode) {
0907     case MLX5_CMD_OP_QUERY_HCA_CAP:
0908     case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
0909     case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
0910         return true;
0911     default:
0912         return false;
0913     }
0914 }
0915 
0916 static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
0917 {
0918     if (devx_is_whitelist_cmd(cmd_in)) {
0919         struct mlx5_ib_dev *dev;
0920 
0921         if (c->devx_uid)
0922             return c->devx_uid;
0923 
0924         dev = to_mdev(c->ibucontext.device);
0925         if (dev->devx_whitelist_uid)
0926             return dev->devx_whitelist_uid;
0927 
0928         return -EOPNOTSUPP;
0929     }
0930 
0931     if (!c->devx_uid)
0932         return -EINVAL;
0933 
0934     return c->devx_uid;
0935 }
0936 
0937 static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
0938 {
0939     u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
0940 
0941     /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
0942     if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
0943          MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
0944         (opcode >= MLX5_CMD_OP_GENERAL_START &&
0945          opcode < MLX5_CMD_OP_GENERAL_END))
0946         return true;
0947 
0948     switch (opcode) {
0949     case MLX5_CMD_OP_QUERY_HCA_CAP:
0950     case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
0951     case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
0952     case MLX5_CMD_OP_QUERY_VPORT_STATE:
0953     case MLX5_CMD_OP_QUERY_ADAPTER:
0954     case MLX5_CMD_OP_QUERY_ISSI:
0955     case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
0956     case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
0957     case MLX5_CMD_OP_QUERY_VNIC_ENV:
0958     case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
0959     case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
0960     case MLX5_CMD_OP_NOP:
0961     case MLX5_CMD_OP_QUERY_CONG_STATUS:
0962     case MLX5_CMD_OP_QUERY_CONG_PARAMS:
0963     case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
0964     case MLX5_CMD_OP_QUERY_LAG:
0965         return true;
0966     default:
0967         return false;
0968     }
0969 }
0970 
0971 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
0972     struct uverbs_attr_bundle *attrs)
0973 {
0974     struct mlx5_ib_ucontext *c;
0975     struct mlx5_ib_dev *dev;
0976     int user_vector;
0977     int dev_eqn;
0978     int err;
0979 
0980     if (uverbs_copy_from(&user_vector, attrs,
0981                  MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
0982         return -EFAULT;
0983 
0984     c = devx_ufile2uctx(attrs);
0985     if (IS_ERR(c))
0986         return PTR_ERR(c);
0987     dev = to_mdev(c->ibucontext.device);
0988 
0989     err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
0990     if (err < 0)
0991         return err;
0992 
0993     if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
0994                &dev_eqn, sizeof(dev_eqn)))
0995         return -EFAULT;
0996 
0997     return 0;
0998 }
0999 
1000 /*
1001  *Security note:
1002  * The hardware protection mechanism works like this: Each device object that
1003  * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1004  * the device specification manual) upon its creation. Then upon doorbell,
1005  * hardware fetches the object context for which the doorbell was rang, and
1006  * validates that the UAR through which the DB was rang matches the UAR ID
1007  * of the object.
1008  * If no match the doorbell is silently ignored by the hardware. Of course,
1009  * the user cannot ring a doorbell on a UAR that was not mapped to it.
1010  * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1011  * mailboxes (except tagging them with UID), we expose to the user its UAR
1012  * ID, so it can embed it in these objects in the expected specification
1013  * format. So the only thing the user can do is hurt itself by creating a
1014  * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1015  * may ring a doorbell on its objects.
1016  * The consequence of that will be that another user can schedule a QP/SQ
1017  * of the buggy user for execution (just insert it to the hardware schedule
1018  * queue or arm its CQ for event generation), no further harm is expected.
1019  */
1020 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1021     struct uverbs_attr_bundle *attrs)
1022 {
1023     struct mlx5_ib_ucontext *c;
1024     struct mlx5_ib_dev *dev;
1025     u32 user_idx;
1026     s32 dev_idx;
1027 
1028     c = devx_ufile2uctx(attrs);
1029     if (IS_ERR(c))
1030         return PTR_ERR(c);
1031     dev = to_mdev(c->ibucontext.device);
1032 
1033     if (uverbs_copy_from(&user_idx, attrs,
1034                  MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1035         return -EFAULT;
1036 
1037     dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1038     if (dev_idx < 0)
1039         return dev_idx;
1040 
1041     if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1042                &dev_idx, sizeof(dev_idx)))
1043         return -EFAULT;
1044 
1045     return 0;
1046 }
1047 
1048 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1049     struct uverbs_attr_bundle *attrs)
1050 {
1051     struct mlx5_ib_ucontext *c;
1052     struct mlx5_ib_dev *dev;
1053     void *cmd_in = uverbs_attr_get_alloced_ptr(
1054         attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1055     int cmd_out_len = uverbs_attr_get_len(attrs,
1056                     MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1057     void *cmd_out;
1058     int err, err2;
1059     int uid;
1060 
1061     c = devx_ufile2uctx(attrs);
1062     if (IS_ERR(c))
1063         return PTR_ERR(c);
1064     dev = to_mdev(c->ibucontext.device);
1065 
1066     uid = devx_get_uid(c, cmd_in);
1067     if (uid < 0)
1068         return uid;
1069 
1070     /* Only white list of some general HCA commands are allowed for this method. */
1071     if (!devx_is_general_cmd(cmd_in, dev))
1072         return -EINVAL;
1073 
1074     cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1075     if (IS_ERR(cmd_out))
1076         return PTR_ERR(cmd_out);
1077 
1078     MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1079     err = mlx5_cmd_do(dev->mdev, cmd_in,
1080               uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1081               cmd_out, cmd_out_len);
1082     if (err && err != -EREMOTEIO)
1083         return err;
1084 
1085     err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1086                   cmd_out_len);
1087 
1088     return err2 ?: err;
1089 }
1090 
1091 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1092                        u32 *dinlen,
1093                        u32 *obj_id)
1094 {
1095     u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
1096     u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1097 
1098     *obj_id = devx_get_created_obj_id(in, out, opcode);
1099     *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1100     MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1101 
1102     switch (opcode) {
1103     case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1104         MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1105         MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1106         MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
1107              MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
1108         break;
1109 
1110     case MLX5_CMD_OP_CREATE_UMEM:
1111         MLX5_SET(destroy_umem_in, din, opcode,
1112              MLX5_CMD_OP_DESTROY_UMEM);
1113         MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
1114         break;
1115     case MLX5_CMD_OP_CREATE_MKEY:
1116         MLX5_SET(destroy_mkey_in, din, opcode,
1117              MLX5_CMD_OP_DESTROY_MKEY);
1118         MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
1119         break;
1120     case MLX5_CMD_OP_CREATE_CQ:
1121         MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1122         MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
1123         break;
1124     case MLX5_CMD_OP_ALLOC_PD:
1125         MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1126         MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
1127         break;
1128     case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1129         MLX5_SET(dealloc_transport_domain_in, din, opcode,
1130              MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1131         MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
1132              *obj_id);
1133         break;
1134     case MLX5_CMD_OP_CREATE_RMP:
1135         MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1136         MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
1137         break;
1138     case MLX5_CMD_OP_CREATE_SQ:
1139         MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1140         MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
1141         break;
1142     case MLX5_CMD_OP_CREATE_RQ:
1143         MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1144         MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
1145         break;
1146     case MLX5_CMD_OP_CREATE_RQT:
1147         MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1148         MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
1149         break;
1150     case MLX5_CMD_OP_CREATE_TIR:
1151         MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1152         MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1153         break;
1154     case MLX5_CMD_OP_CREATE_TIS:
1155         MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1156         MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
1157         break;
1158     case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1159         MLX5_SET(dealloc_q_counter_in, din, opcode,
1160              MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1161         MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
1162         break;
1163     case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1164         *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1165         MLX5_SET(destroy_flow_table_in, din, other_vport,
1166              MLX5_GET(create_flow_table_in,  in, other_vport));
1167         MLX5_SET(destroy_flow_table_in, din, vport_number,
1168              MLX5_GET(create_flow_table_in,  in, vport_number));
1169         MLX5_SET(destroy_flow_table_in, din, table_type,
1170              MLX5_GET(create_flow_table_in,  in, table_type));
1171         MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1172         MLX5_SET(destroy_flow_table_in, din, opcode,
1173              MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1174         break;
1175     case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1176         *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1177         MLX5_SET(destroy_flow_group_in, din, other_vport,
1178              MLX5_GET(create_flow_group_in, in, other_vport));
1179         MLX5_SET(destroy_flow_group_in, din, vport_number,
1180              MLX5_GET(create_flow_group_in, in, vport_number));
1181         MLX5_SET(destroy_flow_group_in, din, table_type,
1182              MLX5_GET(create_flow_group_in, in, table_type));
1183         MLX5_SET(destroy_flow_group_in, din, table_id,
1184              MLX5_GET(create_flow_group_in, in, table_id));
1185         MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1186         MLX5_SET(destroy_flow_group_in, din, opcode,
1187              MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1188         break;
1189     case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1190         *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1191         MLX5_SET(delete_fte_in, din, other_vport,
1192              MLX5_GET(set_fte_in,  in, other_vport));
1193         MLX5_SET(delete_fte_in, din, vport_number,
1194              MLX5_GET(set_fte_in, in, vport_number));
1195         MLX5_SET(delete_fte_in, din, table_type,
1196              MLX5_GET(set_fte_in, in, table_type));
1197         MLX5_SET(delete_fte_in, din, table_id,
1198              MLX5_GET(set_fte_in, in, table_id));
1199         MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1200         MLX5_SET(delete_fte_in, din, opcode,
1201              MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1202         break;
1203     case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1204         MLX5_SET(dealloc_flow_counter_in, din, opcode,
1205              MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1206         MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
1207              *obj_id);
1208         break;
1209     case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1210         MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
1211              MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1212         MLX5_SET(dealloc_packet_reformat_context_in, din,
1213              packet_reformat_id, *obj_id);
1214         break;
1215     case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1216         MLX5_SET(dealloc_modify_header_context_in, din, opcode,
1217              MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1218         MLX5_SET(dealloc_modify_header_context_in, din,
1219              modify_header_id, *obj_id);
1220         break;
1221     case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1222         *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1223         MLX5_SET(destroy_scheduling_element_in, din,
1224              scheduling_hierarchy,
1225              MLX5_GET(create_scheduling_element_in, in,
1226                   scheduling_hierarchy));
1227         MLX5_SET(destroy_scheduling_element_in, din,
1228              scheduling_element_id, *obj_id);
1229         MLX5_SET(destroy_scheduling_element_in, din, opcode,
1230              MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1231         break;
1232     case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1233         *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1234         MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1235         MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
1236              MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1237         break;
1238     case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1239         *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1240         MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1241         MLX5_SET(delete_l2_table_entry_in, din, opcode,
1242              MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1243         break;
1244     case MLX5_CMD_OP_CREATE_QP:
1245         MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1246         MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
1247         break;
1248     case MLX5_CMD_OP_CREATE_SRQ:
1249         MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1250         MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
1251         break;
1252     case MLX5_CMD_OP_CREATE_XRC_SRQ:
1253         MLX5_SET(destroy_xrc_srq_in, din, opcode,
1254              MLX5_CMD_OP_DESTROY_XRC_SRQ);
1255         MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
1256         break;
1257     case MLX5_CMD_OP_CREATE_DCT:
1258         MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1259         MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
1260         break;
1261     case MLX5_CMD_OP_CREATE_XRQ:
1262         MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1263         MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
1264         break;
1265     case MLX5_CMD_OP_ATTACH_TO_MCG:
1266         *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1267         MLX5_SET(detach_from_mcg_in, din, qpn,
1268              MLX5_GET(attach_to_mcg_in, in, qpn));
1269         memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1270                MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1271                MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1272         MLX5_SET(detach_from_mcg_in, din, opcode,
1273              MLX5_CMD_OP_DETACH_FROM_MCG);
1274         MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
1275         break;
1276     case MLX5_CMD_OP_ALLOC_XRCD:
1277         MLX5_SET(dealloc_xrcd_in, din, opcode,
1278              MLX5_CMD_OP_DEALLOC_XRCD);
1279         MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
1280         break;
1281     case MLX5_CMD_OP_CREATE_PSV:
1282         MLX5_SET(destroy_psv_in, din, opcode,
1283              MLX5_CMD_OP_DESTROY_PSV);
1284         MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
1285         break;
1286     default:
1287         /* The entry must match to one of the devx_is_obj_create_cmd */
1288         WARN_ON(true);
1289         break;
1290     }
1291 }
1292 
1293 static int devx_handle_mkey_indirect(struct devx_obj *obj,
1294                      struct mlx5_ib_dev *dev,
1295                      void *in, void *out)
1296 {
1297     struct mlx5_ib_mkey *mkey = &obj->mkey;
1298     void *mkc;
1299     u8 key;
1300 
1301     mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1302     key = MLX5_GET(mkc, mkc, mkey_7_0);
1303     mkey->key = mlx5_idx_to_mkey(
1304             MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1305     mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1306     mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1307     init_waitqueue_head(&mkey->wait);
1308 
1309     return mlx5r_store_odp_mkey(dev, mkey);
1310 }
1311 
1312 static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1313                    struct devx_obj *obj,
1314                    void *in, int in_len)
1315 {
1316     int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1317             MLX5_FLD_SZ_BYTES(create_mkey_in,
1318             memory_key_mkey_entry);
1319     void *mkc;
1320     u8 access_mode;
1321 
1322     if (in_len < min_len)
1323         return -EINVAL;
1324 
1325     mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1326 
1327     access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1328     access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1329 
1330     if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1331         access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1332         if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1333             obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1334         return 0;
1335     }
1336 
1337     MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1338     return 0;
1339 }
1340 
1341 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1342                       struct devx_event_subscription *sub)
1343 {
1344     struct devx_event *event;
1345     struct devx_obj_event *xa_val_level2;
1346 
1347     if (sub->is_cleaned)
1348         return;
1349 
1350     sub->is_cleaned = 1;
1351     list_del_rcu(&sub->xa_list);
1352 
1353     if (list_empty(&sub->obj_list))
1354         return;
1355 
1356     list_del_rcu(&sub->obj_list);
1357     /* check whether key level 1 for this obj_sub_list is empty */
1358     event = xa_load(&dev->devx_event_table.event_xa,
1359             sub->xa_key_level1);
1360     WARN_ON(!event);
1361 
1362     xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1363     if (list_empty(&xa_val_level2->obj_sub_list)) {
1364         xa_erase(&event->object_ids,
1365              sub->xa_key_level2);
1366         kfree_rcu(xa_val_level2, rcu);
1367     }
1368 }
1369 
1370 static int devx_obj_cleanup(struct ib_uobject *uobject,
1371                 enum rdma_remove_reason why,
1372                 struct uverbs_attr_bundle *attrs)
1373 {
1374     u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1375     struct mlx5_devx_event_table *devx_event_table;
1376     struct devx_obj *obj = uobject->object;
1377     struct devx_event_subscription *sub_entry, *tmp;
1378     struct mlx5_ib_dev *dev;
1379     int ret;
1380 
1381     dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1382     if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
1383         xa_erase(&obj->ib_dev->odp_mkeys,
1384              mlx5_base_mkey(obj->mkey.key)))
1385         /*
1386          * The pagefault_single_data_segment() does commands against
1387          * the mmkey, we must wait for that to stop before freeing the
1388          * mkey, as another allocation could get the same mkey #.
1389          */
1390         mlx5r_deref_wait_odp_mkey(&obj->mkey);
1391 
1392     if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1393         ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1394     else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1395         ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1396     else
1397         ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1398                     obj->dinlen, out, sizeof(out));
1399     if (ret)
1400         return ret;
1401 
1402     devx_event_table = &dev->devx_event_table;
1403 
1404     mutex_lock(&devx_event_table->event_xa_lock);
1405     list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1406         devx_cleanup_subscription(dev, sub_entry);
1407     mutex_unlock(&devx_event_table->event_xa_lock);
1408 
1409     kfree(obj);
1410     return ret;
1411 }
1412 
1413 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1414 {
1415     struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1416     struct mlx5_devx_event_table *table;
1417     struct devx_event *event;
1418     struct devx_obj_event *obj_event;
1419     u32 obj_id = mcq->cqn;
1420 
1421     table = &obj->ib_dev->devx_event_table;
1422     rcu_read_lock();
1423     event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1424     if (!event)
1425         goto out;
1426 
1427     obj_event = xa_load(&event->object_ids, obj_id);
1428     if (!obj_event)
1429         goto out;
1430 
1431     dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1432 out:
1433     rcu_read_unlock();
1434 }
1435 
1436 static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
1437 {
1438     if (!MLX5_CAP_GEN(dev->mdev, apu) ||
1439         !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
1440         return false;
1441 
1442     return true;
1443 }
1444 
1445 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1446     struct uverbs_attr_bundle *attrs)
1447 {
1448     void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1449     int cmd_out_len =  uverbs_attr_get_len(attrs,
1450                     MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1451     int cmd_in_len = uverbs_attr_get_len(attrs,
1452                     MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1453     void *cmd_out;
1454     struct ib_uobject *uobj = uverbs_attr_get_uobject(
1455         attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1456     struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1457         &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1458     struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1459     u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1460     struct devx_obj *obj;
1461     u16 obj_type = 0;
1462     int err, err2 = 0;
1463     int uid;
1464     u32 obj_id;
1465     u16 opcode;
1466 
1467     if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1468         return -EINVAL;
1469 
1470     uid = devx_get_uid(c, cmd_in);
1471     if (uid < 0)
1472         return uid;
1473 
1474     if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1475         return -EINVAL;
1476 
1477     cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1478     if (IS_ERR(cmd_out))
1479         return PTR_ERR(cmd_out);
1480 
1481     obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1482     if (!obj)
1483         return -ENOMEM;
1484 
1485     MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1486     if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1487         err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1488         if (err)
1489             goto obj_free;
1490     } else {
1491         devx_set_umem_valid(cmd_in);
1492     }
1493 
1494     if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1495         obj->flags |= DEVX_OBJ_FLAGS_DCT;
1496         err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
1497                        cmd_in_len, cmd_out, cmd_out_len);
1498     } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
1499            !is_apu_cq(dev, cmd_in)) {
1500         obj->flags |= DEVX_OBJ_FLAGS_CQ;
1501         obj->core_cq.comp = devx_cq_comp;
1502         err = mlx5_create_cq(dev->mdev, &obj->core_cq,
1503                      cmd_in, cmd_in_len, cmd_out,
1504                      cmd_out_len);
1505     } else {
1506         err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
1507                   cmd_out, cmd_out_len);
1508     }
1509 
1510     if (err == -EREMOTEIO)
1511         err2 = uverbs_copy_to(attrs,
1512                       MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1513                       cmd_out, cmd_out_len);
1514     if (err)
1515         goto obj_free;
1516 
1517     if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1518         u8 bulk = MLX5_GET(alloc_flow_counter_in,
1519                    cmd_in,
1520                    flow_counter_bulk);
1521         obj->flow_counter_bulk_size = 128UL * bulk;
1522     }
1523 
1524     uobj->object = obj;
1525     INIT_LIST_HEAD(&obj->event_sub);
1526     obj->ib_dev = dev;
1527     devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1528                    &obj_id);
1529     WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1530 
1531     err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1532     if (err)
1533         goto obj_destroy;
1534 
1535     if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1536         obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1537     obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1538 
1539     if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1540         err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1541         if (err)
1542             goto obj_destroy;
1543     }
1544     return 0;
1545 
1546 obj_destroy:
1547     if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1548         mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1549     else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1550         mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1551     else
1552         mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1553                   sizeof(out));
1554 obj_free:
1555     kfree(obj);
1556     return err2 ?: err;
1557 }
1558 
1559 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1560     struct uverbs_attr_bundle *attrs)
1561 {
1562     void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1563     int cmd_out_len = uverbs_attr_get_len(attrs,
1564                     MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1565     struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1566                               MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1567     struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1568         &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1569     struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1570     void *cmd_out;
1571     int err, err2;
1572     int uid;
1573 
1574     if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1575         return -EINVAL;
1576 
1577     uid = devx_get_uid(c, cmd_in);
1578     if (uid < 0)
1579         return uid;
1580 
1581     if (!devx_is_obj_modify_cmd(cmd_in))
1582         return -EINVAL;
1583 
1584     if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1585         return -EINVAL;
1586 
1587     cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1588     if (IS_ERR(cmd_out))
1589         return PTR_ERR(cmd_out);
1590 
1591     MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1592     devx_set_umem_valid(cmd_in);
1593 
1594     err = mlx5_cmd_do(mdev->mdev, cmd_in,
1595               uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1596               cmd_out, cmd_out_len);
1597     if (err && err != -EREMOTEIO)
1598         return err;
1599 
1600     err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1601                   cmd_out, cmd_out_len);
1602 
1603     return err2 ?: err;
1604 }
1605 
1606 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1607     struct uverbs_attr_bundle *attrs)
1608 {
1609     void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1610     int cmd_out_len = uverbs_attr_get_len(attrs,
1611                           MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1612     struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1613                               MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1614     struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1615         &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1616     void *cmd_out;
1617     int err, err2;
1618     int uid;
1619     struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1620 
1621     if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1622         return -EINVAL;
1623 
1624     uid = devx_get_uid(c, cmd_in);
1625     if (uid < 0)
1626         return uid;
1627 
1628     if (!devx_is_obj_query_cmd(cmd_in))
1629         return -EINVAL;
1630 
1631     if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1632         return -EINVAL;
1633 
1634     cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1635     if (IS_ERR(cmd_out))
1636         return PTR_ERR(cmd_out);
1637 
1638     MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1639     err = mlx5_cmd_do(mdev->mdev, cmd_in,
1640               uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1641               cmd_out, cmd_out_len);
1642     if (err && err != -EREMOTEIO)
1643         return err;
1644 
1645     err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1646                   cmd_out, cmd_out_len);
1647 
1648     return err2 ?: err;
1649 }
1650 
1651 struct devx_async_event_queue {
1652     spinlock_t      lock;
1653     wait_queue_head_t   poll_wait;
1654     struct list_head    event_list;
1655     atomic_t        bytes_in_use;
1656     u8          is_destroyed:1;
1657 };
1658 
1659 struct devx_async_cmd_event_file {
1660     struct ib_uobject       uobj;
1661     struct devx_async_event_queue   ev_queue;
1662     struct mlx5_async_ctx       async_ctx;
1663 };
1664 
1665 static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1666 {
1667     spin_lock_init(&ev_queue->lock);
1668     INIT_LIST_HEAD(&ev_queue->event_list);
1669     init_waitqueue_head(&ev_queue->poll_wait);
1670     atomic_set(&ev_queue->bytes_in_use, 0);
1671     ev_queue->is_destroyed = 0;
1672 }
1673 
1674 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1675     struct uverbs_attr_bundle *attrs)
1676 {
1677     struct devx_async_cmd_event_file *ev_file;
1678 
1679     struct ib_uobject *uobj = uverbs_attr_get_uobject(
1680         attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1681     struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1682 
1683     ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1684                    uobj);
1685     devx_init_event_queue(&ev_file->ev_queue);
1686     mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1687     return 0;
1688 }
1689 
1690 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1691     struct uverbs_attr_bundle *attrs)
1692 {
1693     struct ib_uobject *uobj = uverbs_attr_get_uobject(
1694         attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1695     struct devx_async_event_file *ev_file;
1696     struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1697         &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1698     struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1699     u32 flags;
1700     int err;
1701 
1702     err = uverbs_get_flags32(&flags, attrs,
1703         MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1704         MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1705 
1706     if (err)
1707         return err;
1708 
1709     ev_file = container_of(uobj, struct devx_async_event_file,
1710                    uobj);
1711     spin_lock_init(&ev_file->lock);
1712     INIT_LIST_HEAD(&ev_file->event_list);
1713     init_waitqueue_head(&ev_file->poll_wait);
1714     if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1715         ev_file->omit_data = 1;
1716     INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1717     ev_file->dev = dev;
1718     get_device(&dev->ib_dev.dev);
1719     return 0;
1720 }
1721 
1722 static void devx_query_callback(int status, struct mlx5_async_work *context)
1723 {
1724     struct devx_async_data *async_data =
1725         container_of(context, struct devx_async_data, cb_work);
1726     struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1727     struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1728     unsigned long flags;
1729 
1730     /*
1731      * Note that if the struct devx_async_cmd_event_file uobj begins to be
1732      * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1733      * routine returns, ensuring that it always remains valid here.
1734      */
1735     spin_lock_irqsave(&ev_queue->lock, flags);
1736     list_add_tail(&async_data->list, &ev_queue->event_list);
1737     spin_unlock_irqrestore(&ev_queue->lock, flags);
1738 
1739     wake_up_interruptible(&ev_queue->poll_wait);
1740 }
1741 
1742 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1743 
1744 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1745     struct uverbs_attr_bundle *attrs)
1746 {
1747     void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1748                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1749     struct ib_uobject *uobj = uverbs_attr_get_uobject(
1750                 attrs,
1751                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1752     u16 cmd_out_len;
1753     struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1754         &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1755     struct ib_uobject *fd_uobj;
1756     int err;
1757     int uid;
1758     struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1759     struct devx_async_cmd_event_file *ev_file;
1760     struct devx_async_data *async_data;
1761 
1762     if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1763         return -EINVAL;
1764 
1765     uid = devx_get_uid(c, cmd_in);
1766     if (uid < 0)
1767         return uid;
1768 
1769     if (!devx_is_obj_query_cmd(cmd_in))
1770         return -EINVAL;
1771 
1772     err = uverbs_get_const(&cmd_out_len, attrs,
1773                    MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1774     if (err)
1775         return err;
1776 
1777     if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1778         return -EINVAL;
1779 
1780     fd_uobj = uverbs_attr_get_uobject(attrs,
1781                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1782     if (IS_ERR(fd_uobj))
1783         return PTR_ERR(fd_uobj);
1784 
1785     ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1786                    uobj);
1787 
1788     if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1789             MAX_ASYNC_BYTES_IN_USE) {
1790         atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1791         return -EAGAIN;
1792     }
1793 
1794     async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1795                       cmd_out_len), GFP_KERNEL);
1796     if (!async_data) {
1797         err = -ENOMEM;
1798         goto sub_bytes;
1799     }
1800 
1801     err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1802                    MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1803     if (err)
1804         goto free_async;
1805 
1806     async_data->cmd_out_len = cmd_out_len;
1807     async_data->mdev = mdev;
1808     async_data->ev_file = ev_file;
1809 
1810     MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1811     err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1812             uverbs_attr_get_len(attrs,
1813                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1814             async_data->hdr.out_data,
1815             async_data->cmd_out_len,
1816             devx_query_callback, &async_data->cb_work);
1817 
1818     if (err)
1819         goto free_async;
1820 
1821     return 0;
1822 
1823 free_async:
1824     kvfree(async_data);
1825 sub_bytes:
1826     atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1827     return err;
1828 }
1829 
1830 static void
1831 subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1832                u32 key_level1,
1833                bool is_level2,
1834                u32 key_level2)
1835 {
1836     struct devx_event *event;
1837     struct devx_obj_event *xa_val_level2;
1838 
1839     /* Level 1 is valid for future use, no need to free */
1840     if (!is_level2)
1841         return;
1842 
1843     event = xa_load(&devx_event_table->event_xa, key_level1);
1844     WARN_ON(!event);
1845 
1846     xa_val_level2 = xa_load(&event->object_ids,
1847                 key_level2);
1848     if (list_empty(&xa_val_level2->obj_sub_list)) {
1849         xa_erase(&event->object_ids,
1850              key_level2);
1851         kfree_rcu(xa_val_level2, rcu);
1852     }
1853 }
1854 
1855 static int
1856 subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1857              u32 key_level1,
1858              bool is_level2,
1859              u32 key_level2)
1860 {
1861     struct devx_obj_event *obj_event;
1862     struct devx_event *event;
1863     int err;
1864 
1865     event = xa_load(&devx_event_table->event_xa, key_level1);
1866     if (!event) {
1867         event = kzalloc(sizeof(*event), GFP_KERNEL);
1868         if (!event)
1869             return -ENOMEM;
1870 
1871         INIT_LIST_HEAD(&event->unaffiliated_list);
1872         xa_init(&event->object_ids);
1873 
1874         err = xa_insert(&devx_event_table->event_xa,
1875                 key_level1,
1876                 event,
1877                 GFP_KERNEL);
1878         if (err) {
1879             kfree(event);
1880             return err;
1881         }
1882     }
1883 
1884     if (!is_level2)
1885         return 0;
1886 
1887     obj_event = xa_load(&event->object_ids, key_level2);
1888     if (!obj_event) {
1889         obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1890         if (!obj_event)
1891             /* Level1 is valid for future use, no need to free */
1892             return -ENOMEM;
1893 
1894         err = xa_insert(&event->object_ids,
1895                 key_level2,
1896                 obj_event,
1897                 GFP_KERNEL);
1898         if (err) {
1899             kfree(obj_event);
1900             return err;
1901         }
1902         INIT_LIST_HEAD(&obj_event->obj_sub_list);
1903     }
1904 
1905     return 0;
1906 }
1907 
1908 static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1909                    struct devx_obj *obj)
1910 {
1911     int i;
1912 
1913     for (i = 0; i < num_events; i++) {
1914         if (obj) {
1915             if (!is_legacy_obj_event_num(event_type_num_list[i]))
1916                 return false;
1917         } else if (!is_legacy_unaffiliated_event_num(
1918                 event_type_num_list[i])) {
1919             return false;
1920         }
1921     }
1922 
1923     return true;
1924 }
1925 
1926 #define MAX_SUPP_EVENT_NUM 255
1927 static bool is_valid_events(struct mlx5_core_dev *dev,
1928                 int num_events, u16 *event_type_num_list,
1929                 struct devx_obj *obj)
1930 {
1931     __be64 *aff_events;
1932     __be64 *unaff_events;
1933     int mask_entry;
1934     int mask_bit;
1935     int i;
1936 
1937     if (MLX5_CAP_GEN(dev, event_cap)) {
1938         aff_events = MLX5_CAP_DEV_EVENT(dev,
1939                         user_affiliated_events);
1940         unaff_events = MLX5_CAP_DEV_EVENT(dev,
1941                           user_unaffiliated_events);
1942     } else {
1943         return is_valid_events_legacy(num_events, event_type_num_list,
1944                           obj);
1945     }
1946 
1947     for (i = 0; i < num_events; i++) {
1948         if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1949             return false;
1950 
1951         mask_entry = event_type_num_list[i] / 64;
1952         mask_bit = event_type_num_list[i] % 64;
1953 
1954         if (obj) {
1955             /* CQ completion */
1956             if (event_type_num_list[i] == 0)
1957                 continue;
1958 
1959             if (!(be64_to_cpu(aff_events[mask_entry]) &
1960                     (1ull << mask_bit)))
1961                 return false;
1962 
1963             continue;
1964         }
1965 
1966         if (!(be64_to_cpu(unaff_events[mask_entry]) &
1967                 (1ull << mask_bit)))
1968             return false;
1969     }
1970 
1971     return true;
1972 }
1973 
1974 #define MAX_NUM_EVENTS 16
1975 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1976     struct uverbs_attr_bundle *attrs)
1977 {
1978     struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1979                 attrs,
1980                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1981     struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1982         &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1983     struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1984     struct ib_uobject *fd_uobj;
1985     struct devx_obj *obj = NULL;
1986     struct devx_async_event_file *ev_file;
1987     struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1988     u16 *event_type_num_list;
1989     struct devx_event_subscription *event_sub, *tmp_sub;
1990     struct list_head sub_list;
1991     int redirect_fd;
1992     bool use_eventfd = false;
1993     int num_events;
1994     int num_alloc_xa_entries = 0;
1995     u16 obj_type = 0;
1996     u64 cookie = 0;
1997     u32 obj_id = 0;
1998     int err;
1999     int i;
2000 
2001     if (!c->devx_uid)
2002         return -EINVAL;
2003 
2004     if (!IS_ERR(devx_uobj)) {
2005         obj = (struct devx_obj *)devx_uobj->object;
2006         if (obj)
2007             obj_id = get_dec_obj_id(obj->obj_id);
2008     }
2009 
2010     fd_uobj = uverbs_attr_get_uobject(attrs,
2011                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
2012     if (IS_ERR(fd_uobj))
2013         return PTR_ERR(fd_uobj);
2014 
2015     ev_file = container_of(fd_uobj, struct devx_async_event_file,
2016                    uobj);
2017 
2018     if (uverbs_attr_is_valid(attrs,
2019                  MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
2020         err = uverbs_copy_from(&redirect_fd, attrs,
2021                    MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
2022         if (err)
2023             return err;
2024 
2025         use_eventfd = true;
2026     }
2027 
2028     if (uverbs_attr_is_valid(attrs,
2029                  MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
2030         if (use_eventfd)
2031             return -EINVAL;
2032 
2033         err = uverbs_copy_from(&cookie, attrs,
2034                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
2035         if (err)
2036             return err;
2037     }
2038 
2039     num_events = uverbs_attr_ptr_get_array_size(
2040         attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2041         sizeof(u16));
2042 
2043     if (num_events < 0)
2044         return num_events;
2045 
2046     if (num_events > MAX_NUM_EVENTS)
2047         return -EINVAL;
2048 
2049     event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2050             MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2051 
2052     if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2053         return -EINVAL;
2054 
2055     INIT_LIST_HEAD(&sub_list);
2056 
2057     /* Protect from concurrent subscriptions to same XA entries to allow
2058      * both to succeed
2059      */
2060     mutex_lock(&devx_event_table->event_xa_lock);
2061     for (i = 0; i < num_events; i++) {
2062         u32 key_level1;
2063 
2064         if (obj)
2065             obj_type = get_dec_obj_type(obj,
2066                             event_type_num_list[i]);
2067         key_level1 = event_type_num_list[i] | obj_type << 16;
2068 
2069         err = subscribe_event_xa_alloc(devx_event_table,
2070                            key_level1,
2071                            obj,
2072                            obj_id);
2073         if (err)
2074             goto err;
2075 
2076         num_alloc_xa_entries++;
2077         event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2078         if (!event_sub) {
2079             err = -ENOMEM;
2080             goto err;
2081         }
2082 
2083         list_add_tail(&event_sub->event_list, &sub_list);
2084         uverbs_uobject_get(&ev_file->uobj);
2085         if (use_eventfd) {
2086             event_sub->eventfd =
2087                 eventfd_ctx_fdget(redirect_fd);
2088 
2089             if (IS_ERR(event_sub->eventfd)) {
2090                 err = PTR_ERR(event_sub->eventfd);
2091                 event_sub->eventfd = NULL;
2092                 goto err;
2093             }
2094         }
2095 
2096         event_sub->cookie = cookie;
2097         event_sub->ev_file = ev_file;
2098         /* May be needed upon cleanup the devx object/subscription */
2099         event_sub->xa_key_level1 = key_level1;
2100         event_sub->xa_key_level2 = obj_id;
2101         INIT_LIST_HEAD(&event_sub->obj_list);
2102     }
2103 
2104     /* Once all the allocations and the XA data insertions were done we
2105      * can go ahead and add all the subscriptions to the relevant lists
2106      * without concern of a failure.
2107      */
2108     list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2109         struct devx_event *event;
2110         struct devx_obj_event *obj_event;
2111 
2112         list_del_init(&event_sub->event_list);
2113 
2114         spin_lock_irq(&ev_file->lock);
2115         list_add_tail_rcu(&event_sub->file_list,
2116                   &ev_file->subscribed_events_list);
2117         spin_unlock_irq(&ev_file->lock);
2118 
2119         event = xa_load(&devx_event_table->event_xa,
2120                 event_sub->xa_key_level1);
2121         WARN_ON(!event);
2122 
2123         if (!obj) {
2124             list_add_tail_rcu(&event_sub->xa_list,
2125                       &event->unaffiliated_list);
2126             continue;
2127         }
2128 
2129         obj_event = xa_load(&event->object_ids, obj_id);
2130         WARN_ON(!obj_event);
2131         list_add_tail_rcu(&event_sub->xa_list,
2132                   &obj_event->obj_sub_list);
2133         list_add_tail_rcu(&event_sub->obj_list,
2134                   &obj->event_sub);
2135     }
2136 
2137     mutex_unlock(&devx_event_table->event_xa_lock);
2138     return 0;
2139 
2140 err:
2141     list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2142         list_del(&event_sub->event_list);
2143 
2144         subscribe_event_xa_dealloc(devx_event_table,
2145                        event_sub->xa_key_level1,
2146                        obj,
2147                        obj_id);
2148 
2149         if (event_sub->eventfd)
2150             eventfd_ctx_put(event_sub->eventfd);
2151         uverbs_uobject_put(&event_sub->ev_file->uobj);
2152         kfree(event_sub);
2153     }
2154 
2155     mutex_unlock(&devx_event_table->event_xa_lock);
2156     return err;
2157 }
2158 
2159 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2160              struct uverbs_attr_bundle *attrs,
2161              struct devx_umem *obj)
2162 {
2163     u64 addr;
2164     size_t size;
2165     u32 access;
2166     int err;
2167 
2168     if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2169         uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2170         return -EFAULT;
2171 
2172     err = uverbs_get_flags32(&access, attrs,
2173                  MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2174                  IB_ACCESS_LOCAL_WRITE |
2175                  IB_ACCESS_REMOTE_WRITE |
2176                  IB_ACCESS_REMOTE_READ);
2177     if (err)
2178         return err;
2179 
2180     err = ib_check_mr_access(&dev->ib_dev, access);
2181     if (err)
2182         return err;
2183 
2184     obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
2185     if (IS_ERR(obj->umem))
2186         return PTR_ERR(obj->umem);
2187     return 0;
2188 }
2189 
2190 static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
2191                            unsigned long pgsz_bitmap)
2192 {
2193     unsigned long page_size;
2194 
2195     /* Don't bother checking larger page sizes as offset must be zero and
2196      * total DEVX umem length must be equal to total umem length.
2197      */
2198     pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
2199                      PAGE_SHIFT),
2200                    MLX5_ADAPTER_PAGE_SHIFT);
2201     if (!pgsz_bitmap)
2202         return 0;
2203 
2204     page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
2205     if (!page_size)
2206         return 0;
2207 
2208     /* If the page_size is less than the CPU page size then we can use the
2209      * offset and create a umem which is a subset of the page list.
2210      * For larger page sizes we can't be sure the DMA  list reflects the
2211      * VA so we must ensure that the umem extent is exactly equal to the
2212      * page list. Reduce the page size until one of these cases is true.
2213      */
2214     while ((ib_umem_dma_offset(umem, page_size) != 0 ||
2215         (umem->length % page_size) != 0) &&
2216         page_size > PAGE_SIZE)
2217         page_size /= 2;
2218 
2219     return page_size;
2220 }
2221 
2222 static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
2223                    struct uverbs_attr_bundle *attrs,
2224                    struct devx_umem *obj,
2225                    struct devx_umem_reg_cmd *cmd)
2226 {
2227     unsigned long pgsz_bitmap;
2228     unsigned int page_size;
2229     __be64 *mtt;
2230     void *umem;
2231     int ret;
2232 
2233     /*
2234      * If the user does not pass in pgsz_bitmap then the user promises not
2235      * to use umem_offset!=0 in any commands that allocate on top of the
2236      * umem.
2237      *
2238      * If the user wants to use a umem_offset then it must pass in
2239      * pgsz_bitmap which guides the maximum page size and thus maximum
2240      * object alignment inside the umem. See the PRM.
2241      *
2242      * Users are not allowed to use IOVA here, mkeys are not supported on
2243      * umem.
2244      */
2245     ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
2246             MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2247             GENMASK_ULL(63,
2248                     min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
2249     if (ret)
2250         return ret;
2251 
2252     page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
2253     if (!page_size)
2254         return -EINVAL;
2255 
2256     cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2257              (MLX5_ST_SZ_BYTES(mtt) *
2258               ib_umem_num_dma_blocks(obj->umem, page_size));
2259     cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2260     if (IS_ERR(cmd->in))
2261         return PTR_ERR(cmd->in);
2262 
2263     umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2264     mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2265 
2266     MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2267     MLX5_SET64(umem, umem, num_of_mtt,
2268            ib_umem_num_dma_blocks(obj->umem, page_size));
2269     MLX5_SET(umem, umem, log_page_size,
2270          order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
2271     MLX5_SET(umem, umem, page_offset,
2272          ib_umem_dma_offset(obj->umem, page_size));
2273 
2274     mlx5_ib_populate_pas(obj->umem, page_size, mtt,
2275                  (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2276                      MLX5_IB_MTT_READ);
2277     return 0;
2278 }
2279 
2280 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2281     struct uverbs_attr_bundle *attrs)
2282 {
2283     struct devx_umem_reg_cmd cmd;
2284     struct devx_umem *obj;
2285     struct ib_uobject *uobj = uverbs_attr_get_uobject(
2286         attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2287     u32 obj_id;
2288     struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2289         &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2290     struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2291     int err;
2292 
2293     if (!c->devx_uid)
2294         return -EINVAL;
2295 
2296     obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2297     if (!obj)
2298         return -ENOMEM;
2299 
2300     err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2301     if (err)
2302         goto err_obj_free;
2303 
2304     err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
2305     if (err)
2306         goto err_umem_release;
2307 
2308     MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2309     err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2310                 sizeof(cmd.out));
2311     if (err)
2312         goto err_umem_release;
2313 
2314     obj->mdev = dev->mdev;
2315     uobj->object = obj;
2316     devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2317     uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2318 
2319     err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
2320                  sizeof(obj_id));
2321     return err;
2322 
2323 err_umem_release:
2324     ib_umem_release(obj->umem);
2325 err_obj_free:
2326     kfree(obj);
2327     return err;
2328 }
2329 
2330 static int devx_umem_cleanup(struct ib_uobject *uobject,
2331                  enum rdma_remove_reason why,
2332                  struct uverbs_attr_bundle *attrs)
2333 {
2334     struct devx_umem *obj = uobject->object;
2335     u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2336     int err;
2337 
2338     err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2339     if (err)
2340         return err;
2341 
2342     ib_umem_release(obj->umem);
2343     kfree(obj);
2344     return 0;
2345 }
2346 
2347 static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2348                   unsigned long event_type)
2349 {
2350     __be64 *unaff_events;
2351     int mask_entry;
2352     int mask_bit;
2353 
2354     if (!MLX5_CAP_GEN(dev, event_cap))
2355         return is_legacy_unaffiliated_event_num(event_type);
2356 
2357     unaff_events = MLX5_CAP_DEV_EVENT(dev,
2358                       user_unaffiliated_events);
2359     WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2360 
2361     mask_entry = event_type / 64;
2362     mask_bit = event_type % 64;
2363 
2364     if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2365         return false;
2366 
2367     return true;
2368 }
2369 
2370 static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2371 {
2372     struct mlx5_eqe *eqe = data;
2373     u32 obj_id = 0;
2374 
2375     switch (event_type) {
2376     case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2377     case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2378     case MLX5_EVENT_TYPE_PATH_MIG:
2379     case MLX5_EVENT_TYPE_COMM_EST:
2380     case MLX5_EVENT_TYPE_SQ_DRAINED:
2381     case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2382     case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2383     case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2384     case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2385     case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2386         obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2387         break;
2388     case MLX5_EVENT_TYPE_XRQ_ERROR:
2389         obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2390         break;
2391     case MLX5_EVENT_TYPE_DCT_DRAINED:
2392     case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2393         obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2394         break;
2395     case MLX5_EVENT_TYPE_CQ_ERROR:
2396         obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2397         break;
2398     default:
2399         obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2400         break;
2401     }
2402 
2403     return obj_id;
2404 }
2405 
2406 static int deliver_event(struct devx_event_subscription *event_sub,
2407              const void *data)
2408 {
2409     struct devx_async_event_file *ev_file;
2410     struct devx_async_event_data *event_data;
2411     unsigned long flags;
2412 
2413     ev_file = event_sub->ev_file;
2414 
2415     if (ev_file->omit_data) {
2416         spin_lock_irqsave(&ev_file->lock, flags);
2417         if (!list_empty(&event_sub->event_list) ||
2418             ev_file->is_destroyed) {
2419             spin_unlock_irqrestore(&ev_file->lock, flags);
2420             return 0;
2421         }
2422 
2423         list_add_tail(&event_sub->event_list, &ev_file->event_list);
2424         spin_unlock_irqrestore(&ev_file->lock, flags);
2425         wake_up_interruptible(&ev_file->poll_wait);
2426         return 0;
2427     }
2428 
2429     event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2430                  GFP_ATOMIC);
2431     if (!event_data) {
2432         spin_lock_irqsave(&ev_file->lock, flags);
2433         ev_file->is_overflow_err = 1;
2434         spin_unlock_irqrestore(&ev_file->lock, flags);
2435         return -ENOMEM;
2436     }
2437 
2438     event_data->hdr.cookie = event_sub->cookie;
2439     memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2440 
2441     spin_lock_irqsave(&ev_file->lock, flags);
2442     if (!ev_file->is_destroyed)
2443         list_add_tail(&event_data->list, &ev_file->event_list);
2444     else
2445         kfree(event_data);
2446     spin_unlock_irqrestore(&ev_file->lock, flags);
2447     wake_up_interruptible(&ev_file->poll_wait);
2448 
2449     return 0;
2450 }
2451 
2452 static void dispatch_event_fd(struct list_head *fd_list,
2453                   const void *data)
2454 {
2455     struct devx_event_subscription *item;
2456 
2457     list_for_each_entry_rcu(item, fd_list, xa_list) {
2458         if (item->eventfd)
2459             eventfd_signal(item->eventfd, 1);
2460         else
2461             deliver_event(item, data);
2462     }
2463 }
2464 
2465 static int devx_event_notifier(struct notifier_block *nb,
2466                    unsigned long event_type, void *data)
2467 {
2468     struct mlx5_devx_event_table *table;
2469     struct mlx5_ib_dev *dev;
2470     struct devx_event *event;
2471     struct devx_obj_event *obj_event;
2472     u16 obj_type = 0;
2473     bool is_unaffiliated;
2474     u32 obj_id;
2475 
2476     /* Explicit filtering to kernel events which may occur frequently */
2477     if (event_type == MLX5_EVENT_TYPE_CMD ||
2478         event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2479         return NOTIFY_OK;
2480 
2481     table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2482     dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2483     is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2484 
2485     if (!is_unaffiliated)
2486         obj_type = get_event_obj_type(event_type, data);
2487 
2488     rcu_read_lock();
2489     event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2490     if (!event) {
2491         rcu_read_unlock();
2492         return NOTIFY_DONE;
2493     }
2494 
2495     if (is_unaffiliated) {
2496         dispatch_event_fd(&event->unaffiliated_list, data);
2497         rcu_read_unlock();
2498         return NOTIFY_OK;
2499     }
2500 
2501     obj_id = devx_get_obj_id_from_event(event_type, data);
2502     obj_event = xa_load(&event->object_ids, obj_id);
2503     if (!obj_event) {
2504         rcu_read_unlock();
2505         return NOTIFY_DONE;
2506     }
2507 
2508     dispatch_event_fd(&obj_event->obj_sub_list, data);
2509 
2510     rcu_read_unlock();
2511     return NOTIFY_OK;
2512 }
2513 
2514 int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
2515 {
2516     struct mlx5_devx_event_table *table = &dev->devx_event_table;
2517     int uid;
2518 
2519     uid = mlx5_ib_devx_create(dev, false);
2520     if (uid > 0) {
2521         dev->devx_whitelist_uid = uid;
2522         xa_init(&table->event_xa);
2523         mutex_init(&table->event_xa_lock);
2524         MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2525         mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2526     }
2527 
2528     return 0;
2529 }
2530 
2531 void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
2532 {
2533     struct mlx5_devx_event_table *table = &dev->devx_event_table;
2534     struct devx_event_subscription *sub, *tmp;
2535     struct devx_event *event;
2536     void *entry;
2537     unsigned long id;
2538 
2539     if (dev->devx_whitelist_uid) {
2540         mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2541         mutex_lock(&dev->devx_event_table.event_xa_lock);
2542         xa_for_each(&table->event_xa, id, entry) {
2543             event = entry;
2544             list_for_each_entry_safe(
2545                 sub, tmp, &event->unaffiliated_list, xa_list)
2546                 devx_cleanup_subscription(dev, sub);
2547             kfree(entry);
2548         }
2549         mutex_unlock(&dev->devx_event_table.event_xa_lock);
2550         xa_destroy(&table->event_xa);
2551 
2552         mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
2553     }
2554 }
2555 
2556 static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2557                      size_t count, loff_t *pos)
2558 {
2559     struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2560     struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2561     struct devx_async_data *event;
2562     int ret = 0;
2563     size_t eventsz;
2564 
2565     spin_lock_irq(&ev_queue->lock);
2566 
2567     while (list_empty(&ev_queue->event_list)) {
2568         spin_unlock_irq(&ev_queue->lock);
2569 
2570         if (filp->f_flags & O_NONBLOCK)
2571             return -EAGAIN;
2572 
2573         if (wait_event_interruptible(
2574                 ev_queue->poll_wait,
2575                 (!list_empty(&ev_queue->event_list) ||
2576                  ev_queue->is_destroyed))) {
2577             return -ERESTARTSYS;
2578         }
2579 
2580         spin_lock_irq(&ev_queue->lock);
2581         if (ev_queue->is_destroyed) {
2582             spin_unlock_irq(&ev_queue->lock);
2583             return -EIO;
2584         }
2585     }
2586 
2587     event = list_entry(ev_queue->event_list.next,
2588                struct devx_async_data, list);
2589     eventsz = event->cmd_out_len +
2590             sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2591 
2592     if (eventsz > count) {
2593         spin_unlock_irq(&ev_queue->lock);
2594         return -ENOSPC;
2595     }
2596 
2597     list_del(ev_queue->event_list.next);
2598     spin_unlock_irq(&ev_queue->lock);
2599 
2600     if (copy_to_user(buf, &event->hdr, eventsz))
2601         ret = -EFAULT;
2602     else
2603         ret = eventsz;
2604 
2605     atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2606     kvfree(event);
2607     return ret;
2608 }
2609 
2610 static __poll_t devx_async_cmd_event_poll(struct file *filp,
2611                           struct poll_table_struct *wait)
2612 {
2613     struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2614     struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2615     __poll_t pollflags = 0;
2616 
2617     poll_wait(filp, &ev_queue->poll_wait, wait);
2618 
2619     spin_lock_irq(&ev_queue->lock);
2620     if (ev_queue->is_destroyed)
2621         pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2622     else if (!list_empty(&ev_queue->event_list))
2623         pollflags = EPOLLIN | EPOLLRDNORM;
2624     spin_unlock_irq(&ev_queue->lock);
2625 
2626     return pollflags;
2627 }
2628 
2629 static const struct file_operations devx_async_cmd_event_fops = {
2630     .owner   = THIS_MODULE,
2631     .read    = devx_async_cmd_event_read,
2632     .poll    = devx_async_cmd_event_poll,
2633     .release = uverbs_uobject_fd_release,
2634     .llseek  = no_llseek,
2635 };
2636 
2637 static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2638                      size_t count, loff_t *pos)
2639 {
2640     struct devx_async_event_file *ev_file = filp->private_data;
2641     struct devx_event_subscription *event_sub;
2642     struct devx_async_event_data *event;
2643     int ret = 0;
2644     size_t eventsz;
2645     bool omit_data;
2646     void *event_data;
2647 
2648     omit_data = ev_file->omit_data;
2649 
2650     spin_lock_irq(&ev_file->lock);
2651 
2652     if (ev_file->is_overflow_err) {
2653         ev_file->is_overflow_err = 0;
2654         spin_unlock_irq(&ev_file->lock);
2655         return -EOVERFLOW;
2656     }
2657 
2658 
2659     while (list_empty(&ev_file->event_list)) {
2660         spin_unlock_irq(&ev_file->lock);
2661 
2662         if (filp->f_flags & O_NONBLOCK)
2663             return -EAGAIN;
2664 
2665         if (wait_event_interruptible(ev_file->poll_wait,
2666                 (!list_empty(&ev_file->event_list) ||
2667                  ev_file->is_destroyed))) {
2668             return -ERESTARTSYS;
2669         }
2670 
2671         spin_lock_irq(&ev_file->lock);
2672         if (ev_file->is_destroyed) {
2673             spin_unlock_irq(&ev_file->lock);
2674             return -EIO;
2675         }
2676     }
2677 
2678     if (omit_data) {
2679         event_sub = list_first_entry(&ev_file->event_list,
2680                     struct devx_event_subscription,
2681                     event_list);
2682         eventsz = sizeof(event_sub->cookie);
2683         event_data = &event_sub->cookie;
2684     } else {
2685         event = list_first_entry(&ev_file->event_list,
2686                       struct devx_async_event_data, list);
2687         eventsz = sizeof(struct mlx5_eqe) +
2688             sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2689         event_data = &event->hdr;
2690     }
2691 
2692     if (eventsz > count) {
2693         spin_unlock_irq(&ev_file->lock);
2694         return -EINVAL;
2695     }
2696 
2697     if (omit_data)
2698         list_del_init(&event_sub->event_list);
2699     else
2700         list_del(&event->list);
2701 
2702     spin_unlock_irq(&ev_file->lock);
2703 
2704     if (copy_to_user(buf, event_data, eventsz))
2705         /* This points to an application issue, not a kernel concern */
2706         ret = -EFAULT;
2707     else
2708         ret = eventsz;
2709 
2710     if (!omit_data)
2711         kfree(event);
2712     return ret;
2713 }
2714 
2715 static __poll_t devx_async_event_poll(struct file *filp,
2716                       struct poll_table_struct *wait)
2717 {
2718     struct devx_async_event_file *ev_file = filp->private_data;
2719     __poll_t pollflags = 0;
2720 
2721     poll_wait(filp, &ev_file->poll_wait, wait);
2722 
2723     spin_lock_irq(&ev_file->lock);
2724     if (ev_file->is_destroyed)
2725         pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2726     else if (!list_empty(&ev_file->event_list))
2727         pollflags = EPOLLIN | EPOLLRDNORM;
2728     spin_unlock_irq(&ev_file->lock);
2729 
2730     return pollflags;
2731 }
2732 
2733 static void devx_free_subscription(struct rcu_head *rcu)
2734 {
2735     struct devx_event_subscription *event_sub =
2736         container_of(rcu, struct devx_event_subscription, rcu);
2737 
2738     if (event_sub->eventfd)
2739         eventfd_ctx_put(event_sub->eventfd);
2740     uverbs_uobject_put(&event_sub->ev_file->uobj);
2741     kfree(event_sub);
2742 }
2743 
2744 static const struct file_operations devx_async_event_fops = {
2745     .owner   = THIS_MODULE,
2746     .read    = devx_async_event_read,
2747     .poll    = devx_async_event_poll,
2748     .release = uverbs_uobject_fd_release,
2749     .llseek  = no_llseek,
2750 };
2751 
2752 static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2753                           enum rdma_remove_reason why)
2754 {
2755     struct devx_async_cmd_event_file *comp_ev_file =
2756         container_of(uobj, struct devx_async_cmd_event_file,
2757                  uobj);
2758     struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2759     struct devx_async_data *entry, *tmp;
2760 
2761     spin_lock_irq(&ev_queue->lock);
2762     ev_queue->is_destroyed = 1;
2763     spin_unlock_irq(&ev_queue->lock);
2764     wake_up_interruptible(&ev_queue->poll_wait);
2765 
2766     mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2767 
2768     spin_lock_irq(&comp_ev_file->ev_queue.lock);
2769     list_for_each_entry_safe(entry, tmp,
2770                  &comp_ev_file->ev_queue.event_list, list) {
2771         list_del(&entry->list);
2772         kvfree(entry);
2773     }
2774     spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2775 };
2776 
2777 static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2778                       enum rdma_remove_reason why)
2779 {
2780     struct devx_async_event_file *ev_file =
2781         container_of(uobj, struct devx_async_event_file,
2782                  uobj);
2783     struct devx_event_subscription *event_sub, *event_sub_tmp;
2784     struct mlx5_ib_dev *dev = ev_file->dev;
2785 
2786     spin_lock_irq(&ev_file->lock);
2787     ev_file->is_destroyed = 1;
2788 
2789     /* free the pending events allocation */
2790     if (ev_file->omit_data) {
2791         struct devx_event_subscription *event_sub, *tmp;
2792 
2793         list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2794                      event_list)
2795             list_del_init(&event_sub->event_list);
2796 
2797     } else {
2798         struct devx_async_event_data *entry, *tmp;
2799 
2800         list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2801                      list) {
2802             list_del(&entry->list);
2803             kfree(entry);
2804         }
2805     }
2806 
2807     spin_unlock_irq(&ev_file->lock);
2808     wake_up_interruptible(&ev_file->poll_wait);
2809 
2810     mutex_lock(&dev->devx_event_table.event_xa_lock);
2811     /* delete the subscriptions which are related to this FD */
2812     list_for_each_entry_safe(event_sub, event_sub_tmp,
2813                  &ev_file->subscribed_events_list, file_list) {
2814         devx_cleanup_subscription(dev, event_sub);
2815         list_del_rcu(&event_sub->file_list);
2816         /* subscription may not be used by the read API any more */
2817         call_rcu(&event_sub->rcu, devx_free_subscription);
2818     }
2819     mutex_unlock(&dev->devx_event_table.event_xa_lock);
2820 
2821     put_device(&dev->ib_dev.dev);
2822 };
2823 
2824 DECLARE_UVERBS_NAMED_METHOD(
2825     MLX5_IB_METHOD_DEVX_UMEM_REG,
2826     UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2827             MLX5_IB_OBJECT_DEVX_UMEM,
2828             UVERBS_ACCESS_NEW,
2829             UA_MANDATORY),
2830     UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2831                UVERBS_ATTR_TYPE(u64),
2832                UA_MANDATORY),
2833     UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2834                UVERBS_ATTR_TYPE(u64),
2835                UA_MANDATORY),
2836     UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2837                  enum ib_access_flags),
2838     UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2839                  u64),
2840     UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2841                 UVERBS_ATTR_TYPE(u32),
2842                 UA_MANDATORY));
2843 
2844 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2845     MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2846     UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2847             MLX5_IB_OBJECT_DEVX_UMEM,
2848             UVERBS_ACCESS_DESTROY,
2849             UA_MANDATORY));
2850 
2851 DECLARE_UVERBS_NAMED_METHOD(
2852     MLX5_IB_METHOD_DEVX_QUERY_EQN,
2853     UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2854                UVERBS_ATTR_TYPE(u32),
2855                UA_MANDATORY),
2856     UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2857                 UVERBS_ATTR_TYPE(u32),
2858                 UA_MANDATORY));
2859 
2860 DECLARE_UVERBS_NAMED_METHOD(
2861     MLX5_IB_METHOD_DEVX_QUERY_UAR,
2862     UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2863                UVERBS_ATTR_TYPE(u32),
2864                UA_MANDATORY),
2865     UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2866                 UVERBS_ATTR_TYPE(u32),
2867                 UA_MANDATORY));
2868 
2869 DECLARE_UVERBS_NAMED_METHOD(
2870     MLX5_IB_METHOD_DEVX_OTHER,
2871     UVERBS_ATTR_PTR_IN(
2872         MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2873         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2874         UA_MANDATORY,
2875         UA_ALLOC_AND_COPY),
2876     UVERBS_ATTR_PTR_OUT(
2877         MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2878         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2879         UA_MANDATORY));
2880 
2881 DECLARE_UVERBS_NAMED_METHOD(
2882     MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2883     UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2884             MLX5_IB_OBJECT_DEVX_OBJ,
2885             UVERBS_ACCESS_NEW,
2886             UA_MANDATORY),
2887     UVERBS_ATTR_PTR_IN(
2888         MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2889         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2890         UA_MANDATORY,
2891         UA_ALLOC_AND_COPY),
2892     UVERBS_ATTR_PTR_OUT(
2893         MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2894         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2895         UA_MANDATORY));
2896 
2897 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2898     MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2899     UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2900             MLX5_IB_OBJECT_DEVX_OBJ,
2901             UVERBS_ACCESS_DESTROY,
2902             UA_MANDATORY));
2903 
2904 DECLARE_UVERBS_NAMED_METHOD(
2905     MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2906     UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2907             UVERBS_IDR_ANY_OBJECT,
2908             UVERBS_ACCESS_WRITE,
2909             UA_MANDATORY),
2910     UVERBS_ATTR_PTR_IN(
2911         MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2912         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2913         UA_MANDATORY,
2914         UA_ALLOC_AND_COPY),
2915     UVERBS_ATTR_PTR_OUT(
2916         MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2917         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2918         UA_MANDATORY));
2919 
2920 DECLARE_UVERBS_NAMED_METHOD(
2921     MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2922     UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2923             UVERBS_IDR_ANY_OBJECT,
2924             UVERBS_ACCESS_READ,
2925             UA_MANDATORY),
2926     UVERBS_ATTR_PTR_IN(
2927         MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2928         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2929         UA_MANDATORY,
2930         UA_ALLOC_AND_COPY),
2931     UVERBS_ATTR_PTR_OUT(
2932         MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2933         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2934         UA_MANDATORY));
2935 
2936 DECLARE_UVERBS_NAMED_METHOD(
2937     MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2938     UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2939             UVERBS_IDR_ANY_OBJECT,
2940             UVERBS_ACCESS_READ,
2941             UA_MANDATORY),
2942     UVERBS_ATTR_PTR_IN(
2943         MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2944         UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2945         UA_MANDATORY,
2946         UA_ALLOC_AND_COPY),
2947     UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2948         u16, UA_MANDATORY),
2949     UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2950         MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2951         UVERBS_ACCESS_READ,
2952         UA_MANDATORY),
2953     UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2954         UVERBS_ATTR_TYPE(u64),
2955         UA_MANDATORY));
2956 
2957 DECLARE_UVERBS_NAMED_METHOD(
2958     MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2959     UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2960         MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2961         UVERBS_ACCESS_READ,
2962         UA_MANDATORY),
2963     UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2964         MLX5_IB_OBJECT_DEVX_OBJ,
2965         UVERBS_ACCESS_READ,
2966         UA_OPTIONAL),
2967     UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2968         UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2969         UA_MANDATORY,
2970         UA_ALLOC_AND_COPY),
2971     UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2972         UVERBS_ATTR_TYPE(u64),
2973         UA_OPTIONAL),
2974     UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2975         UVERBS_ATTR_TYPE(u32),
2976         UA_OPTIONAL));
2977 
2978 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2979                   &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2980                   &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2981                   &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2982                   &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2983 
2984 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2985                 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2986                 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2987                 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2988                 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2989                 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2990                 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2991 
2992 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2993                 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2994                 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2995                 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2996 
2997 
2998 DECLARE_UVERBS_NAMED_METHOD(
2999     MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
3000     UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
3001             MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3002             UVERBS_ACCESS_NEW,
3003             UA_MANDATORY));
3004 
3005 DECLARE_UVERBS_NAMED_OBJECT(
3006     MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3007     UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
3008                  devx_async_cmd_event_destroy_uobj,
3009                  &devx_async_cmd_event_fops, "[devx_async_cmd]",
3010                  O_RDONLY),
3011     &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
3012 
3013 DECLARE_UVERBS_NAMED_METHOD(
3014     MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
3015     UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
3016             MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3017             UVERBS_ACCESS_NEW,
3018             UA_MANDATORY),
3019     UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
3020             enum mlx5_ib_uapi_devx_create_event_channel_flags,
3021             UA_MANDATORY));
3022 
3023 DECLARE_UVERBS_NAMED_OBJECT(
3024     MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3025     UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
3026                  devx_async_event_destroy_uobj,
3027                  &devx_async_event_fops, "[devx_async_event]",
3028                  O_RDONLY),
3029     &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
3030 
3031 static bool devx_is_supported(struct ib_device *device)
3032 {
3033     struct mlx5_ib_dev *dev = to_mdev(device);
3034 
3035     return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
3036 }
3037 
3038 const struct uapi_definition mlx5_ib_devx_defs[] = {
3039     UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3040         MLX5_IB_OBJECT_DEVX,
3041         UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3042     UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3043         MLX5_IB_OBJECT_DEVX_OBJ,
3044         UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3045     UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3046         MLX5_IB_OBJECT_DEVX_UMEM,
3047         UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3048     UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3049         MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3050         UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3051     UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3052         MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3053         UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3054     {},
3055 };