Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /*
0003  * Copyright (c) 2013-2018, Mellanox Technologies inc.  All rights reserved.
0004  */
0005 
0006 #include <linux/kernel.h>
0007 #include <linux/mlx5/driver.h>
0008 #include "mlx5_ib.h"
0009 #include "srq.h"
0010 #include "qp.h"
0011 
0012 static int get_pas_size(struct mlx5_srq_attr *in)
0013 {
0014     u32 log_page_size = in->log_page_size + 12;
0015     u32 log_srq_size  = in->log_size;
0016     u32 log_rq_stride = in->wqe_shift;
0017     u32 page_offset   = in->page_offset;
0018     u32 po_quanta     = 1 << (log_page_size - 6);
0019     u32 rq_sz     = 1 << (log_srq_size + 4 + log_rq_stride);
0020     u32 page_size     = 1 << log_page_size;
0021     u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
0022     u32 rq_num_pas    = DIV_ROUND_UP(rq_sz_po, page_size);
0023 
0024     return rq_num_pas * sizeof(u64);
0025 }
0026 
0027 static void set_wq(void *wq, struct mlx5_srq_attr *in)
0028 {
0029     MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
0030          & MLX5_SRQ_FLAG_WQ_SIG));
0031     MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
0032     MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
0033     MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
0034     MLX5_SET(wq,   wq, page_offset,   in->page_offset);
0035     MLX5_SET(wq,   wq, lwm,       in->lwm);
0036     MLX5_SET(wq,   wq, pd,        in->pd);
0037     MLX5_SET64(wq, wq, dbr_addr,      in->db_record);
0038 }
0039 
0040 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
0041 {
0042     MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
0043          & MLX5_SRQ_FLAG_WQ_SIG));
0044     MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
0045     MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
0046     MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
0047     MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
0048     MLX5_SET(srqc,   srqc, lwm,       in->lwm);
0049     MLX5_SET(srqc,   srqc, pd,        in->pd);
0050     MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
0051     MLX5_SET(srqc,   srqc, xrcd,          in->xrcd);
0052     MLX5_SET(srqc,   srqc, cqn,       in->cqn);
0053 }
0054 
0055 static void get_wq(void *wq, struct mlx5_srq_attr *in)
0056 {
0057     if (MLX5_GET(wq, wq, wq_signature))
0058         in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
0059     in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
0060     in->wqe_shift     = MLX5_GET(wq,   wq, log_wq_stride) - 4;
0061     in->log_size      = MLX5_GET(wq,   wq, log_wq_sz);
0062     in->page_offset   = MLX5_GET(wq,   wq, page_offset);
0063     in->lwm       = MLX5_GET(wq,   wq, lwm);
0064     in->pd        = MLX5_GET(wq,   wq, pd);
0065     in->db_record     = MLX5_GET64(wq, wq, dbr_addr);
0066 }
0067 
0068 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
0069 {
0070     if (MLX5_GET(srqc, srqc, wq_signature))
0071         in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
0072     in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
0073     in->wqe_shift     = MLX5_GET(srqc,   srqc, log_rq_stride);
0074     in->log_size      = MLX5_GET(srqc,   srqc, log_srq_size);
0075     in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
0076     in->lwm       = MLX5_GET(srqc,   srqc, lwm);
0077     in->pd        = MLX5_GET(srqc,   srqc, pd);
0078     in->db_record     = MLX5_GET64(srqc, srqc, dbr_addr);
0079 }
0080 
0081 struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
0082 {
0083     struct mlx5_srq_table *table = &dev->srq_table;
0084     struct mlx5_core_srq *srq;
0085 
0086     xa_lock_irq(&table->array);
0087     srq = xa_load(&table->array, srqn);
0088     if (srq)
0089         refcount_inc(&srq->common.refcount);
0090     xa_unlock_irq(&table->array);
0091 
0092     return srq;
0093 }
0094 
0095 static int __set_srq_page_size(struct mlx5_srq_attr *in,
0096                    unsigned long page_size)
0097 {
0098     if (!page_size)
0099         return -EINVAL;
0100     in->log_page_size = order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT;
0101 
0102     if (WARN_ON(get_pas_size(in) !=
0103             ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64)))
0104         return -EINVAL;
0105     return 0;
0106 }
0107 
0108 #define set_srq_page_size(in, typ, log_pgsz_fld)                               \
0109     __set_srq_page_size(in, mlx5_umem_find_best_quantized_pgoff(           \
0110                     (in)->umem, typ, log_pgsz_fld,         \
0111                     MLX5_ADAPTER_PAGE_SHIFT, page_offset,  \
0112                     64, &(in)->page_offset))
0113 
0114 static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0115               struct mlx5_srq_attr *in)
0116 {
0117     u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
0118     void *create_in;
0119     void *srqc;
0120     void *pas;
0121     int pas_size;
0122     int inlen;
0123     int err;
0124 
0125     if (in->umem) {
0126         err = set_srq_page_size(in, srqc, log_page_size);
0127         if (err)
0128             return err;
0129     }
0130 
0131     pas_size  = get_pas_size(in);
0132     inlen     = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
0133     create_in = kvzalloc(inlen, GFP_KERNEL);
0134     if (!create_in)
0135         return -ENOMEM;
0136 
0137     MLX5_SET(create_srq_in, create_in, uid, in->uid);
0138     srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
0139     pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
0140 
0141     set_srqc(srqc, in);
0142     if (in->umem)
0143         mlx5_ib_populate_pas(
0144             in->umem,
0145             1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
0146             pas, 0);
0147     else
0148         memcpy(pas, in->pas, pas_size);
0149 
0150     MLX5_SET(create_srq_in, create_in, opcode,
0151          MLX5_CMD_OP_CREATE_SRQ);
0152 
0153     err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
0154                 sizeof(create_out));
0155     kvfree(create_in);
0156     if (!err) {
0157         srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
0158         srq->uid = in->uid;
0159     }
0160 
0161     return err;
0162 }
0163 
0164 static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
0165 {
0166     u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
0167 
0168     MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
0169     MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
0170     MLX5_SET(destroy_srq_in, in, uid, srq->uid);
0171 
0172     return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
0173 }
0174 
0175 static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0176                u16 lwm, int is_srq)
0177 {
0178     u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
0179 
0180     MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
0181     MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
0182     MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
0183     MLX5_SET(arm_rq_in, in, lwm, lwm);
0184     MLX5_SET(arm_rq_in, in, uid, srq->uid);
0185 
0186     return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
0187 }
0188 
0189 static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0190              struct mlx5_srq_attr *out)
0191 {
0192     u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
0193     u32 *srq_out;
0194     void *srqc;
0195     int err;
0196 
0197     srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
0198     if (!srq_out)
0199         return -ENOMEM;
0200 
0201     MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
0202     MLX5_SET(query_srq_in, in, srqn, srq->srqn);
0203     err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
0204     if (err)
0205         goto out;
0206 
0207     srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
0208     get_srqc(srqc, out);
0209     if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
0210         out->flags |= MLX5_SRQ_FLAG_ERR;
0211 out:
0212     kvfree(srq_out);
0213     return err;
0214 }
0215 
0216 static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
0217                   struct mlx5_core_srq *srq,
0218                   struct mlx5_srq_attr *in)
0219 {
0220     u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
0221     void *create_in;
0222     void *xrc_srqc;
0223     void *pas;
0224     int pas_size;
0225     int inlen;
0226     int err;
0227 
0228     if (in->umem) {
0229         err = set_srq_page_size(in, xrc_srqc, log_page_size);
0230         if (err)
0231             return err;
0232     }
0233 
0234     pas_size  = get_pas_size(in);
0235     inlen     = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
0236     create_in = kvzalloc(inlen, GFP_KERNEL);
0237     if (!create_in)
0238         return -ENOMEM;
0239 
0240     MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
0241     xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
0242                 xrc_srq_context_entry);
0243     pas  = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
0244 
0245     set_srqc(xrc_srqc, in);
0246     MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
0247     if (in->umem)
0248         mlx5_ib_populate_pas(
0249             in->umem,
0250             1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
0251             pas, 0);
0252     else
0253         memcpy(pas, in->pas, pas_size);
0254     MLX5_SET(create_xrc_srq_in, create_in, opcode,
0255          MLX5_CMD_OP_CREATE_XRC_SRQ);
0256 
0257     memset(create_out, 0, sizeof(create_out));
0258     err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
0259                 sizeof(create_out));
0260     if (err)
0261         goto out;
0262 
0263     srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
0264     srq->uid = in->uid;
0265 out:
0266     kvfree(create_in);
0267     return err;
0268 }
0269 
0270 static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
0271                    struct mlx5_core_srq *srq)
0272 {
0273     u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
0274 
0275     MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
0276     MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
0277     MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
0278 
0279     return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
0280 }
0281 
0282 static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0283                u16 lwm)
0284 {
0285     u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
0286 
0287     MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
0288     MLX5_SET(arm_xrc_srq_in, in, op_mod,
0289          MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
0290     MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
0291     MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
0292     MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
0293 
0294     return  mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
0295 }
0296 
0297 static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
0298                  struct mlx5_core_srq *srq,
0299                  struct mlx5_srq_attr *out)
0300 {
0301     u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
0302     u32 *xrcsrq_out;
0303     void *xrc_srqc;
0304     int err;
0305 
0306     xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
0307     if (!xrcsrq_out)
0308         return -ENOMEM;
0309 
0310     MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
0311     MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
0312 
0313     err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
0314     if (err)
0315         goto out;
0316 
0317     xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
0318                 xrc_srq_context_entry);
0319     get_srqc(xrc_srqc, out);
0320     if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
0321         out->flags |= MLX5_SRQ_FLAG_ERR;
0322 
0323 out:
0324     kvfree(xrcsrq_out);
0325     return err;
0326 }
0327 
0328 static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0329               struct mlx5_srq_attr *in)
0330 {
0331     void *create_out = NULL;
0332     void *create_in = NULL;
0333     void *rmpc;
0334     void *wq;
0335     void *pas;
0336     int pas_size;
0337     int outlen;
0338     int inlen;
0339     int err;
0340 
0341     if (in->umem) {
0342         err = set_srq_page_size(in, wq, log_wq_pg_sz);
0343         if (err)
0344             return err;
0345     }
0346 
0347     pas_size = get_pas_size(in);
0348     inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
0349     outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
0350     create_in = kvzalloc(inlen, GFP_KERNEL);
0351     create_out = kvzalloc(outlen, GFP_KERNEL);
0352     if (!create_in || !create_out) {
0353         err = -ENOMEM;
0354         goto out;
0355     }
0356 
0357     rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
0358     wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
0359 
0360     MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
0361     MLX5_SET(create_rmp_in, create_in, uid, in->uid);
0362     pas = MLX5_ADDR_OF(rmpc, rmpc, wq.pas);
0363 
0364     set_wq(wq, in);
0365     if (in->umem)
0366         mlx5_ib_populate_pas(
0367             in->umem,
0368             1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
0369             pas, 0);
0370     else
0371         memcpy(pas, in->pas, pas_size);
0372 
0373     MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
0374     err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
0375     if (!err) {
0376         srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
0377         srq->uid = in->uid;
0378     }
0379 
0380 out:
0381     kvfree(create_in);
0382     kvfree(create_out);
0383     return err;
0384 }
0385 
0386 static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
0387 {
0388     u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
0389 
0390     MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
0391     MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
0392     MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
0393     return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
0394 }
0395 
0396 static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0397                u16 lwm)
0398 {
0399     void *out = NULL;
0400     void *in = NULL;
0401     void *rmpc;
0402     void *wq;
0403     void *bitmask;
0404     int outlen;
0405     int inlen;
0406     int err;
0407 
0408     inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
0409     outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
0410 
0411     in = kvzalloc(inlen, GFP_KERNEL);
0412     out = kvzalloc(outlen, GFP_KERNEL);
0413     if (!in || !out) {
0414         err = -ENOMEM;
0415         goto out;
0416     }
0417 
0418     rmpc =    MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
0419     bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
0420     wq   =    MLX5_ADDR_OF(rmpc,            rmpc, wq);
0421 
0422     MLX5_SET(modify_rmp_in, in,  rmp_state, MLX5_RMPC_STATE_RDY);
0423     MLX5_SET(modify_rmp_in, in,  rmpn,      srq->srqn);
0424     MLX5_SET(modify_rmp_in, in, uid, srq->uid);
0425     MLX5_SET(wq,        wq,  lwm,       lwm);
0426     MLX5_SET(rmp_bitmask,   bitmask, lwm,       1);
0427     MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
0428     MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
0429 
0430     err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
0431 
0432 out:
0433     kvfree(in);
0434     kvfree(out);
0435     return err;
0436 }
0437 
0438 static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0439              struct mlx5_srq_attr *out)
0440 {
0441     u32 *rmp_out = NULL;
0442     u32 *rmp_in = NULL;
0443     void *rmpc;
0444     int outlen;
0445     int inlen;
0446     int err;
0447 
0448     outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
0449     inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
0450 
0451     rmp_out = kvzalloc(outlen, GFP_KERNEL);
0452     rmp_in = kvzalloc(inlen, GFP_KERNEL);
0453     if (!rmp_out || !rmp_in) {
0454         err = -ENOMEM;
0455         goto out;
0456     }
0457 
0458     MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
0459     MLX5_SET(query_rmp_in, rmp_in, rmpn,   srq->srqn);
0460     err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
0461     if (err)
0462         goto out;
0463 
0464     rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
0465     get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
0466     if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
0467         out->flags |= MLX5_SRQ_FLAG_ERR;
0468 
0469 out:
0470     kvfree(rmp_out);
0471     kvfree(rmp_in);
0472     return err;
0473 }
0474 
0475 static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0476               struct mlx5_srq_attr *in)
0477 {
0478     u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
0479     void *create_in;
0480     void *xrqc;
0481     void *wq;
0482     void *pas;
0483     int pas_size;
0484     int inlen;
0485     int err;
0486 
0487     if (in->umem) {
0488         err = set_srq_page_size(in, wq, log_wq_pg_sz);
0489         if (err)
0490             return err;
0491     }
0492 
0493     pas_size = get_pas_size(in);
0494     inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
0495     create_in = kvzalloc(inlen, GFP_KERNEL);
0496     if (!create_in)
0497         return -ENOMEM;
0498 
0499     xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
0500     wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
0501     pas = MLX5_ADDR_OF(xrqc, xrqc, wq.pas);
0502 
0503     set_wq(wq, in);
0504     if (in->umem)
0505         mlx5_ib_populate_pas(
0506             in->umem,
0507             1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
0508             pas, 0);
0509     else
0510         memcpy(pas, in->pas, pas_size);
0511 
0512     if (in->type == IB_SRQT_TM) {
0513         MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
0514         if (in->flags & MLX5_SRQ_FLAG_RNDV)
0515             MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
0516         MLX5_SET(xrqc, xrqc,
0517              tag_matching_topology_context.log_matching_list_sz,
0518              in->tm_log_list_size);
0519     }
0520     MLX5_SET(xrqc, xrqc, user_index, in->user_index);
0521     MLX5_SET(xrqc, xrqc, cqn, in->cqn);
0522     MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
0523     MLX5_SET(create_xrq_in, create_in, uid, in->uid);
0524     err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
0525                 sizeof(create_out));
0526     kvfree(create_in);
0527     if (!err) {
0528         srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
0529         srq->uid = in->uid;
0530     }
0531 
0532     return err;
0533 }
0534 
0535 static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
0536 {
0537     u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
0538 
0539     MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
0540     MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
0541     MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
0542 
0543     return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
0544 }
0545 
0546 static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
0547                struct mlx5_core_srq *srq,
0548                u16 lwm)
0549 {
0550     u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
0551 
0552     MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
0553     MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
0554     MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
0555     MLX5_SET(arm_rq_in, in, lwm, lwm);
0556     MLX5_SET(arm_rq_in, in, uid, srq->uid);
0557 
0558     return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
0559 }
0560 
0561 static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0562              struct mlx5_srq_attr *out)
0563 {
0564     u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
0565     u32 *xrq_out;
0566     int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
0567     void *xrqc;
0568     int err;
0569 
0570     xrq_out = kvzalloc(outlen, GFP_KERNEL);
0571     if (!xrq_out)
0572         return -ENOMEM;
0573 
0574     MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
0575     MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
0576 
0577     err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
0578     if (err)
0579         goto out;
0580 
0581     xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
0582     get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
0583     if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
0584         out->flags |= MLX5_SRQ_FLAG_ERR;
0585     out->tm_next_tag =
0586         MLX5_GET(xrqc, xrqc,
0587              tag_matching_topology_context.append_next_index);
0588     out->tm_hw_phase_cnt =
0589         MLX5_GET(xrqc, xrqc,
0590              tag_matching_topology_context.hw_phase_cnt);
0591     out->tm_sw_phase_cnt =
0592         MLX5_GET(xrqc, xrqc,
0593              tag_matching_topology_context.sw_phase_cnt);
0594 
0595 out:
0596     kvfree(xrq_out);
0597     return err;
0598 }
0599 
0600 static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0601                 struct mlx5_srq_attr *in)
0602 {
0603     if (!dev->mdev->issi)
0604         return create_srq_cmd(dev, srq, in);
0605     switch (srq->common.res) {
0606     case MLX5_RES_XSRQ:
0607         return create_xrc_srq_cmd(dev, srq, in);
0608     case MLX5_RES_XRQ:
0609         return create_xrq_cmd(dev, srq, in);
0610     default:
0611         return create_rmp_cmd(dev, srq, in);
0612     }
0613 }
0614 
0615 static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
0616 {
0617     if (!dev->mdev->issi)
0618         return destroy_srq_cmd(dev, srq);
0619     switch (srq->common.res) {
0620     case MLX5_RES_XSRQ:
0621         return destroy_xrc_srq_cmd(dev, srq);
0622     case MLX5_RES_XRQ:
0623         return destroy_xrq_cmd(dev, srq);
0624     default:
0625         return destroy_rmp_cmd(dev, srq);
0626     }
0627 }
0628 
0629 int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0630             struct mlx5_srq_attr *in)
0631 {
0632     struct mlx5_srq_table *table = &dev->srq_table;
0633     int err;
0634 
0635     switch (in->type) {
0636     case IB_SRQT_XRC:
0637         srq->common.res = MLX5_RES_XSRQ;
0638         break;
0639     case IB_SRQT_TM:
0640         srq->common.res = MLX5_RES_XRQ;
0641         break;
0642     default:
0643         srq->common.res = MLX5_RES_SRQ;
0644     }
0645 
0646     err = create_srq_split(dev, srq, in);
0647     if (err)
0648         return err;
0649 
0650     refcount_set(&srq->common.refcount, 1);
0651     init_completion(&srq->common.free);
0652 
0653     err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
0654     if (err)
0655         goto err_destroy_srq_split;
0656 
0657     return 0;
0658 
0659 err_destroy_srq_split:
0660     destroy_srq_split(dev, srq);
0661 
0662     return err;
0663 }
0664 
0665 int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
0666 {
0667     struct mlx5_srq_table *table = &dev->srq_table;
0668     struct mlx5_core_srq *tmp;
0669     int err;
0670 
0671     /* Delete entry, but leave index occupied */
0672     tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
0673     if (WARN_ON(tmp != srq))
0674         return xa_err(tmp) ?: -EINVAL;
0675 
0676     err = destroy_srq_split(dev, srq);
0677     if (err) {
0678         /*
0679          * We don't need to check returned result for an error,
0680          * because  we are storing in pre-allocated space xarray
0681          * entry and it can't fail at this stage.
0682          */
0683         xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
0684         return err;
0685     }
0686     xa_erase_irq(&table->array, srq->srqn);
0687 
0688     mlx5_core_res_put(&srq->common);
0689     wait_for_completion(&srq->common.free);
0690     return 0;
0691 }
0692 
0693 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0694                struct mlx5_srq_attr *out)
0695 {
0696     if (!dev->mdev->issi)
0697         return query_srq_cmd(dev, srq, out);
0698     switch (srq->common.res) {
0699     case MLX5_RES_XSRQ:
0700         return query_xrc_srq_cmd(dev, srq, out);
0701     case MLX5_RES_XRQ:
0702         return query_xrq_cmd(dev, srq, out);
0703     default:
0704         return query_rmp_cmd(dev, srq, out);
0705     }
0706 }
0707 
0708 int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
0709              u16 lwm, int is_srq)
0710 {
0711     if (!dev->mdev->issi)
0712         return arm_srq_cmd(dev, srq, lwm, is_srq);
0713     switch (srq->common.res) {
0714     case MLX5_RES_XSRQ:
0715         return arm_xrc_srq_cmd(dev, srq, lwm);
0716     case MLX5_RES_XRQ:
0717         return arm_xrq_cmd(dev, srq, lwm);
0718     default:
0719         return arm_rmp_cmd(dev, srq, lwm);
0720     }
0721 }
0722 
0723 static int srq_event_notifier(struct notifier_block *nb,
0724                   unsigned long type, void *data)
0725 {
0726     struct mlx5_srq_table *table;
0727     struct mlx5_core_srq *srq;
0728     struct mlx5_eqe *eqe;
0729     u32 srqn;
0730 
0731     if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
0732         type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
0733         return NOTIFY_DONE;
0734 
0735     table = container_of(nb, struct mlx5_srq_table, nb);
0736 
0737     eqe = data;
0738     srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
0739 
0740     xa_lock(&table->array);
0741     srq = xa_load(&table->array, srqn);
0742     if (srq)
0743         refcount_inc(&srq->common.refcount);
0744     xa_unlock(&table->array);
0745 
0746     if (!srq)
0747         return NOTIFY_OK;
0748 
0749     srq->event(srq, eqe->type);
0750 
0751     mlx5_core_res_put(&srq->common);
0752 
0753     return NOTIFY_OK;
0754 }
0755 
0756 int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
0757 {
0758     struct mlx5_srq_table *table = &dev->srq_table;
0759 
0760     memset(table, 0, sizeof(*table));
0761     xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
0762 
0763     table->nb.notifier_call = srq_event_notifier;
0764     mlx5_notifier_register(dev->mdev, &table->nb);
0765 
0766     return 0;
0767 }
0768 
0769 void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
0770 {
0771     struct mlx5_srq_table *table = &dev->srq_table;
0772 
0773     mlx5_notifier_unregister(dev->mdev, &table->nb);
0774 }