0001
0002
0003
0004 #include <linux/mlx5/device.h>
0005 #include <linux/mlx5/transobj.h>
0006 #include "aso.h"
0007 #include "wq.h"
0008
0009 struct mlx5_aso_cq {
0010
0011 struct mlx5_cqwq wq;
0012
0013
0014 struct mlx5_core_cq mcq;
0015
0016
0017 struct mlx5_core_dev *mdev;
0018 struct mlx5_wq_ctrl wq_ctrl;
0019 } ____cacheline_aligned_in_smp;
0020
0021 struct mlx5_aso {
0022
0023 u16 cc;
0024 u16 pc;
0025
0026 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
0027 struct mlx5_aso_cq cq;
0028
0029
0030 struct mlx5_wq_cyc wq;
0031 void __iomem *uar_map;
0032 u32 sqn;
0033
0034
0035 struct mlx5_wq_ctrl wq_ctrl;
0036
0037 } ____cacheline_aligned_in_smp;
0038
0039 static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
0040 {
0041 mlx5_wq_destroy(&cq->wq_ctrl);
0042 }
0043
0044 static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
0045 void *cqc_data, struct mlx5_aso_cq *cq)
0046 {
0047 struct mlx5_core_cq *mcq = &cq->mcq;
0048 struct mlx5_wq_param param;
0049 int err;
0050 u32 i;
0051
0052 param.buf_numa_node = numa_node;
0053 param.db_numa_node = numa_node;
0054
0055 err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl);
0056 if (err)
0057 return err;
0058
0059 mcq->cqe_sz = 64;
0060 mcq->set_ci_db = cq->wq_ctrl.db.db;
0061 mcq->arm_db = cq->wq_ctrl.db.db + 1;
0062
0063 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
0064 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
0065
0066 cqe->op_own = 0xf1;
0067 }
0068
0069 cq->mdev = mdev;
0070
0071 return 0;
0072 }
0073
0074 static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
0075 {
0076 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
0077 struct mlx5_core_dev *mdev = cq->mdev;
0078 struct mlx5_core_cq *mcq = &cq->mcq;
0079 void *in, *cqc;
0080 int inlen, eqn;
0081 int err;
0082
0083 err = mlx5_vector2eqn(mdev, 0, &eqn);
0084 if (err)
0085 return err;
0086
0087 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
0088 sizeof(u64) * cq->wq_ctrl.buf.npages;
0089 in = kvzalloc(inlen, GFP_KERNEL);
0090 if (!in)
0091 return -ENOMEM;
0092
0093 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
0094
0095 memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
0096
0097 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
0098 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
0099
0100 MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
0101 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
0102 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
0103 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
0104 MLX5_ADAPTER_PAGE_SHIFT);
0105 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
0106
0107 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
0108
0109 kvfree(in);
0110
0111 return err;
0112 }
0113
0114 static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
0115 {
0116 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
0117 mlx5_wq_destroy(&cq->wq_ctrl);
0118 }
0119
0120 static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
0121 struct mlx5_aso_cq *cq)
0122 {
0123 void *cqc_data;
0124 int err;
0125
0126 cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
0127 if (!cqc_data)
0128 return -ENOMEM;
0129
0130 MLX5_SET(cqc, cqc_data, log_cq_size, 1);
0131 MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
0132 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
0133 MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
0134
0135 err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
0136 if (err) {
0137 mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
0138 goto err_out;
0139 }
0140
0141 err = create_aso_cq(cq, cqc_data);
0142 if (err) {
0143 mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
0144 goto err_free_cq;
0145 }
0146
0147 kvfree(cqc_data);
0148 return 0;
0149
0150 err_free_cq:
0151 mlx5_aso_free_cq(cq);
0152 err_out:
0153 kvfree(cqc_data);
0154 return err;
0155 }
0156
0157 static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
0158 void *sqc_data, struct mlx5_aso *sq)
0159 {
0160 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
0161 struct mlx5_wq_cyc *wq = &sq->wq;
0162 struct mlx5_wq_param param;
0163 int err;
0164
0165 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
0166
0167 param.db_numa_node = numa_node;
0168 param.buf_numa_node = numa_node;
0169 err = mlx5_wq_cyc_create(mdev, ¶m, sqc_wq, wq, &sq->wq_ctrl);
0170 if (err)
0171 return err;
0172 wq->db = &wq->db[MLX5_SND_DBR];
0173
0174 return 0;
0175 }
0176
0177 static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
0178 void *sqc_data, struct mlx5_aso *sq)
0179 {
0180 void *in, *sqc, *wq;
0181 int inlen, err;
0182
0183 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
0184 sizeof(u64) * sq->wq_ctrl.buf.npages;
0185 in = kvzalloc(inlen, GFP_KERNEL);
0186 if (!in)
0187 return -ENOMEM;
0188
0189 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
0190 wq = MLX5_ADDR_OF(sqc, sqc, wq);
0191
0192 memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
0193 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
0194
0195 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
0196 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
0197
0198 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
0199 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
0200 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
0201 MLX5_ADAPTER_PAGE_SHIFT);
0202 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
0203
0204 mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
0205 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
0206
0207 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
0208
0209 kvfree(in);
0210
0211 return err;
0212 }
0213
0214 static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
0215 {
0216 void *in, *sqc;
0217 int inlen, err;
0218
0219 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
0220 in = kvzalloc(inlen, GFP_KERNEL);
0221 if (!in)
0222 return -ENOMEM;
0223
0224 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
0225 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
0226 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
0227
0228 err = mlx5_core_modify_sq(mdev, sqn, in);
0229
0230 kvfree(in);
0231
0232 return err;
0233 }
0234
0235 static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
0236 void *sqc_data, struct mlx5_aso *sq)
0237 {
0238 int err;
0239
0240 err = create_aso_sq(mdev, pdn, sqc_data, sq);
0241 if (err)
0242 return err;
0243
0244 err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
0245 if (err)
0246 mlx5_core_destroy_sq(mdev, sq->sqn);
0247
0248 return err;
0249 }
0250
0251 static void mlx5_aso_free_sq(struct mlx5_aso *sq)
0252 {
0253 mlx5_wq_destroy(&sq->wq_ctrl);
0254 }
0255
0256 static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
0257 {
0258 mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
0259 mlx5_aso_free_sq(sq);
0260 }
0261
0262 static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
0263 u32 pdn, struct mlx5_aso *sq)
0264 {
0265 void *sqc_data, *wq;
0266 int err;
0267
0268 sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
0269 if (!sqc_data)
0270 return -ENOMEM;
0271
0272 wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
0273 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
0274 MLX5_SET(wq, wq, pd, pdn);
0275 MLX5_SET(wq, wq, log_wq_sz, 1);
0276
0277 err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
0278 if (err) {
0279 mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
0280 goto err_out;
0281 }
0282
0283 err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
0284 if (err) {
0285 mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
0286 goto err_free_asosq;
0287 }
0288
0289 mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
0290
0291 kvfree(sqc_data);
0292 return 0;
0293
0294 err_free_asosq:
0295 mlx5_aso_free_sq(sq);
0296 err_out:
0297 kvfree(sqc_data);
0298 return err;
0299 }
0300
0301 struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
0302 {
0303 int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
0304 struct mlx5_aso *aso;
0305 int err;
0306
0307 aso = kzalloc(sizeof(*aso), GFP_KERNEL);
0308 if (!aso)
0309 return ERR_PTR(-ENOMEM);
0310
0311 err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
0312 if (err)
0313 goto err_cq;
0314
0315 err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
0316 if (err)
0317 goto err_sq;
0318
0319 return aso;
0320
0321 err_sq:
0322 mlx5_aso_destroy_cq(&aso->cq);
0323 err_cq:
0324 kfree(aso);
0325 return ERR_PTR(err);
0326 }
0327
0328 void mlx5_aso_destroy(struct mlx5_aso *aso)
0329 {
0330 if (IS_ERR_OR_NULL(aso))
0331 return;
0332
0333 mlx5_aso_destroy_sq(aso);
0334 mlx5_aso_destroy_cq(&aso->cq);
0335 kfree(aso);
0336 }
0337
0338 void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
0339 struct mlx5_aso_wqe *aso_wqe,
0340 u32 obj_id, u32 opc_mode)
0341 {
0342 struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
0343
0344 cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
0345 (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
0346 MLX5_OPCODE_ACCESS_ASO);
0347 cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
0348 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
0349 cseg->general_id = cpu_to_be32(obj_id);
0350 }
0351
0352 void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
0353 {
0354 u16 pi;
0355
0356 pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
0357 return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
0358 }
0359
0360 void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
0361 struct mlx5_wqe_ctrl_seg *doorbell_cseg)
0362 {
0363 doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
0364
0365 dma_wmb();
0366
0367 if (with_data)
0368 aso->pc += MLX5_ASO_WQEBBS_DATA;
0369 else
0370 aso->pc += MLX5_ASO_WQEBBS;
0371 *aso->wq.db = cpu_to_be32(aso->pc);
0372
0373
0374
0375
0376 wmb();
0377
0378 mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
0379
0380
0381 WRITE_ONCE(doorbell_cseg, NULL);
0382 }
0383
0384 int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
0385 {
0386 struct mlx5_aso_cq *cq = &aso->cq;
0387 struct mlx5_cqe64 *cqe;
0388 unsigned long expires;
0389
0390 cqe = mlx5_cqwq_get_cqe(&cq->wq);
0391
0392 expires = jiffies + msecs_to_jiffies(interval_ms);
0393 while (!cqe && time_is_after_jiffies(expires)) {
0394 usleep_range(2, 10);
0395 cqe = mlx5_cqwq_get_cqe(&cq->wq);
0396 }
0397
0398 if (!cqe)
0399 return -ETIMEDOUT;
0400
0401
0402
0403
0404 mlx5_cqwq_pop(&cq->wq);
0405
0406 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
0407 struct mlx5_err_cqe *err_cqe;
0408
0409 mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
0410 get_cqe_opcode(cqe));
0411
0412 err_cqe = (struct mlx5_err_cqe *)cqe;
0413 mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
0414 err_cqe->vendor_err_synd);
0415 mlx5_core_err(cq->mdev, "syndrome=%x\n",
0416 err_cqe->syndrome);
0417 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
0418 16, 1, err_cqe,
0419 sizeof(*err_cqe), false);
0420 }
0421
0422 mlx5_cqwq_update_db_record(&cq->wq);
0423
0424
0425 wmb();
0426
0427 if (with_data)
0428 aso->cc += MLX5_ASO_WQEBBS_DATA;
0429 else
0430 aso->cc += MLX5_ASO_WQEBBS;
0431
0432 return 0;
0433 }