0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #define dev_fmt(fmt) "QPLIB: " fmt
0040
0041 #include <linux/spinlock.h>
0042 #include <linux/pci.h>
0043 #include <linux/interrupt.h>
0044 #include <linux/inetdevice.h>
0045 #include <linux/dma-mapping.h>
0046 #include <linux/if_vlan.h>
0047 #include <linux/vmalloc.h>
0048 #include <rdma/ib_verbs.h>
0049 #include <rdma/ib_umem.h>
0050
0051 #include "roce_hsi.h"
0052 #include "qplib_res.h"
0053 #include "qplib_sp.h"
0054 #include "qplib_rcfw.h"
0055
0056 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
0057 struct bnxt_qplib_stats *stats);
0058 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
0059 struct bnxt_qplib_chip_ctx *cctx,
0060 struct bnxt_qplib_stats *stats);
0061
0062
0063 static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
0064 bool is_umem)
0065 {
0066 struct pci_dev *pdev = res->pdev;
0067 int i;
0068
0069 if (!is_umem) {
0070 for (i = 0; i < pbl->pg_count; i++) {
0071 if (pbl->pg_arr[i])
0072 dma_free_coherent(&pdev->dev, pbl->pg_size,
0073 (void *)((unsigned long)
0074 pbl->pg_arr[i] &
0075 PAGE_MASK),
0076 pbl->pg_map_arr[i]);
0077 else
0078 dev_warn(&pdev->dev,
0079 "PBL free pg_arr[%d] empty?!\n", i);
0080 pbl->pg_arr[i] = NULL;
0081 }
0082 }
0083 vfree(pbl->pg_arr);
0084 pbl->pg_arr = NULL;
0085 vfree(pbl->pg_map_arr);
0086 pbl->pg_map_arr = NULL;
0087 pbl->pg_count = 0;
0088 pbl->pg_size = 0;
0089 }
0090
0091 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
0092 struct bnxt_qplib_sg_info *sginfo)
0093 {
0094 struct ib_block_iter biter;
0095 int i = 0;
0096
0097 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
0098 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
0099 pbl->pg_arr[i] = NULL;
0100 pbl->pg_count++;
0101 i++;
0102 }
0103 }
0104
0105 static int __alloc_pbl(struct bnxt_qplib_res *res,
0106 struct bnxt_qplib_pbl *pbl,
0107 struct bnxt_qplib_sg_info *sginfo)
0108 {
0109 struct pci_dev *pdev = res->pdev;
0110 bool is_umem = false;
0111 u32 pages;
0112 int i;
0113
0114 if (sginfo->nopte)
0115 return 0;
0116 if (sginfo->umem)
0117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
0118 else
0119 pages = sginfo->npages;
0120
0121 pbl->pg_arr = vmalloc(pages * sizeof(void *));
0122 if (!pbl->pg_arr)
0123 return -ENOMEM;
0124
0125 pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
0126 if (!pbl->pg_map_arr) {
0127 vfree(pbl->pg_arr);
0128 pbl->pg_arr = NULL;
0129 return -ENOMEM;
0130 }
0131 pbl->pg_count = 0;
0132 pbl->pg_size = sginfo->pgsize;
0133
0134 if (!sginfo->umem) {
0135 for (i = 0; i < pages; i++) {
0136 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
0137 pbl->pg_size,
0138 &pbl->pg_map_arr[i],
0139 GFP_KERNEL);
0140 if (!pbl->pg_arr[i])
0141 goto fail;
0142 pbl->pg_count++;
0143 }
0144 } else {
0145 is_umem = true;
0146 bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
0147 }
0148
0149 return 0;
0150 fail:
0151 __free_pbl(res, pbl, is_umem);
0152 return -ENOMEM;
0153 }
0154
0155
0156 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
0157 struct bnxt_qplib_hwq *hwq)
0158 {
0159 int i;
0160
0161 if (!hwq->max_elements)
0162 return;
0163 if (hwq->level >= PBL_LVL_MAX)
0164 return;
0165
0166 for (i = 0; i < hwq->level + 1; i++) {
0167 if (i == hwq->level)
0168 __free_pbl(res, &hwq->pbl[i], hwq->is_user);
0169 else
0170 __free_pbl(res, &hwq->pbl[i], false);
0171 }
0172
0173 hwq->level = PBL_LVL_MAX;
0174 hwq->max_elements = 0;
0175 hwq->element_size = 0;
0176 hwq->prod = 0;
0177 hwq->cons = 0;
0178 hwq->cp_bit = 0;
0179 }
0180
0181
0182
0183 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
0184 struct bnxt_qplib_hwq_attr *hwq_attr)
0185 {
0186 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
0187 struct bnxt_qplib_sg_info sginfo = {};
0188 u32 depth, stride, npbl, npde;
0189 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
0190 struct bnxt_qplib_res *res;
0191 struct pci_dev *pdev;
0192 int i, rc, lvl;
0193
0194 res = hwq_attr->res;
0195 pdev = res->pdev;
0196 pg_size = hwq_attr->sginfo->pgsize;
0197 hwq->level = PBL_LVL_MAX;
0198
0199 depth = roundup_pow_of_two(hwq_attr->depth);
0200 stride = roundup_pow_of_two(hwq_attr->stride);
0201 if (hwq_attr->aux_depth) {
0202 aux_slots = hwq_attr->aux_depth;
0203 aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
0204 aux_pages = (aux_slots * aux_size) / pg_size;
0205 if ((aux_slots * aux_size) % pg_size)
0206 aux_pages++;
0207 }
0208
0209 if (!hwq_attr->sginfo->umem) {
0210 hwq->is_user = false;
0211 npages = (depth * stride) / pg_size + aux_pages;
0212 if ((depth * stride) % pg_size)
0213 npages++;
0214 if (!npages)
0215 return -EINVAL;
0216 hwq_attr->sginfo->npages = npages;
0217 } else {
0218 unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
0219 hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
0220
0221 hwq->is_user = true;
0222 npages = sginfo_num_pages;
0223 npages = (npages * PAGE_SIZE) /
0224 BIT_ULL(hwq_attr->sginfo->pgshft);
0225 if ((sginfo_num_pages * PAGE_SIZE) %
0226 BIT_ULL(hwq_attr->sginfo->pgshft))
0227 if (!npages)
0228 npages++;
0229 }
0230
0231 if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
0232
0233 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
0234 if (rc)
0235 goto fail;
0236 hwq->level = PBL_LVL_0;
0237 goto done;
0238 }
0239
0240 if (npages >= MAX_PBL_LVL_0_PGS) {
0241 if (npages > MAX_PBL_LVL_1_PGS) {
0242 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
0243 0 : PTU_PTE_VALID;
0244
0245 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
0246 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
0247 npbl++;
0248 npde = npbl >> MAX_PDL_LVL_SHIFT;
0249 if (npbl % BIT(MAX_PDL_LVL_SHIFT))
0250 npde++;
0251
0252 sginfo.pgsize = npde * pg_size;
0253 sginfo.npages = 1;
0254 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
0255
0256
0257 sginfo.npages = npbl;
0258 sginfo.pgsize = PAGE_SIZE;
0259 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
0260 if (rc)
0261 goto fail;
0262
0263 dst_virt_ptr =
0264 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
0265 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
0266 if (hwq_attr->type == HWQ_TYPE_MR) {
0267
0268
0269
0270
0271 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
0272 i++)
0273 dst_virt_ptr[0][i] = src_phys_ptr[i] |
0274 flag;
0275 } else {
0276 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
0277 i++)
0278 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
0279 src_phys_ptr[i] |
0280 PTU_PDE_VALID;
0281 }
0282
0283 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
0284 hwq_attr->sginfo);
0285 if (rc)
0286 goto fail;
0287 hwq->level = PBL_LVL_2;
0288 if (hwq_attr->sginfo->nopte)
0289 goto done;
0290
0291 dst_virt_ptr =
0292 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
0293 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
0294 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
0295 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
0296 src_phys_ptr[i] | PTU_PTE_VALID;
0297 }
0298 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
0299
0300 i = hwq->pbl[PBL_LVL_2].pg_count;
0301 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
0302 PTU_PTE_LAST;
0303 if (i > 1)
0304 dst_virt_ptr[PTR_PG(i - 2)]
0305 [PTR_IDX(i - 2)] |=
0306 PTU_PTE_NEXT_TO_LAST;
0307 }
0308 } else {
0309 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
0310 0 : PTU_PTE_VALID;
0311
0312
0313 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
0314 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
0315 npbl++;
0316 sginfo.npages = npbl;
0317 sginfo.pgsize = PAGE_SIZE;
0318
0319 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
0320 if (rc)
0321 goto fail;
0322
0323 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
0324 hwq_attr->sginfo);
0325 if (rc)
0326 goto fail;
0327 hwq->level = PBL_LVL_1;
0328 if (hwq_attr->sginfo->nopte)
0329 goto done;
0330
0331 dst_virt_ptr =
0332 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
0333 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
0334 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
0335 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
0336 src_phys_ptr[i] | flag;
0337 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
0338
0339 i = hwq->pbl[PBL_LVL_1].pg_count;
0340 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
0341 PTU_PTE_LAST;
0342 if (i > 1)
0343 dst_virt_ptr[PTR_PG(i - 2)]
0344 [PTR_IDX(i - 2)] |=
0345 PTU_PTE_NEXT_TO_LAST;
0346 }
0347 }
0348 }
0349 done:
0350 hwq->prod = 0;
0351 hwq->cons = 0;
0352 hwq->pdev = pdev;
0353 hwq->depth = hwq_attr->depth;
0354 hwq->max_elements = depth;
0355 hwq->element_size = stride;
0356 hwq->qe_ppg = pg_size / stride;
0357
0358 lvl = hwq->level;
0359 if (hwq_attr->sginfo->nopte && hwq->level)
0360 lvl = hwq->level - 1;
0361 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
0362 hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
0363 spin_lock_init(&hwq->lock);
0364
0365 return 0;
0366 fail:
0367 bnxt_qplib_free_hwq(res, hwq);
0368 return -ENOMEM;
0369 }
0370
0371
0372 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
0373 struct bnxt_qplib_ctx *ctx)
0374 {
0375 int i;
0376
0377 bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
0378 bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
0379 bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
0380 bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
0381 bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
0382 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
0383 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
0384
0385 ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
0386 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
0387 bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
0388 }
0389
0390 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
0391 struct bnxt_qplib_ctx *ctx)
0392 {
0393 struct bnxt_qplib_hwq_attr hwq_attr = {};
0394 struct bnxt_qplib_sg_info sginfo = {};
0395 struct bnxt_qplib_tqm_ctx *tqmctx;
0396 int rc = 0;
0397 int i;
0398
0399 tqmctx = &ctx->tqm_ctx;
0400
0401 sginfo.pgsize = PAGE_SIZE;
0402 sginfo.pgshft = PAGE_SHIFT;
0403 hwq_attr.sginfo = &sginfo;
0404 hwq_attr.res = res;
0405 hwq_attr.type = HWQ_TYPE_CTX;
0406 hwq_attr.depth = 512;
0407 hwq_attr.stride = sizeof(u64);
0408
0409 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
0410 if (rc)
0411 goto out;
0412
0413 tqmctx->pde_level = tqmctx->pde.level;
0414
0415 hwq_attr.stride = 1;
0416 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
0417 if (!tqmctx->qcount[i])
0418 continue;
0419 hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
0420 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
0421 if (rc)
0422 goto out;
0423 }
0424 out:
0425 return rc;
0426 }
0427
0428 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
0429 {
0430 struct bnxt_qplib_hwq *tbl;
0431 dma_addr_t *dma_ptr;
0432 __le64 **pbl_ptr, *ptr;
0433 int i, j, k;
0434 int fnz_idx = -1;
0435 int pg_count;
0436
0437 pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
0438
0439 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
0440 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
0441 tbl = &ctx->qtbl[i];
0442 if (!tbl->max_elements)
0443 continue;
0444 if (fnz_idx == -1)
0445 fnz_idx = i;
0446 switch (tbl->level) {
0447 case PBL_LVL_2:
0448 pg_count = tbl->pbl[PBL_LVL_1].pg_count;
0449 for (k = 0; k < pg_count; k++) {
0450 ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
0451 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
0452 *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
0453 }
0454 break;
0455 case PBL_LVL_1:
0456 case PBL_LVL_0:
0457 default:
0458 ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
0459 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
0460 PTU_PTE_VALID);
0461 break;
0462 }
0463 }
0464 if (fnz_idx == -1)
0465 fnz_idx = 0;
0466
0467 ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
0468 ctx->qtbl[fnz_idx].level + 1;
0469 }
0470
0471 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
0472 struct bnxt_qplib_ctx *ctx)
0473 {
0474 int rc = 0;
0475
0476 rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
0477 if (rc)
0478 goto fail;
0479
0480 bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
0481 fail:
0482 return rc;
0483 }
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
0507 struct bnxt_qplib_ctx *ctx,
0508 bool virt_fn, bool is_p5)
0509 {
0510 struct bnxt_qplib_hwq_attr hwq_attr = {};
0511 struct bnxt_qplib_sg_info sginfo = {};
0512 int rc = 0;
0513
0514 if (virt_fn || is_p5)
0515 goto stats_alloc;
0516
0517
0518 sginfo.pgsize = PAGE_SIZE;
0519 sginfo.pgshft = PAGE_SHIFT;
0520 hwq_attr.sginfo = &sginfo;
0521
0522 hwq_attr.res = res;
0523 hwq_attr.depth = ctx->qpc_count;
0524 hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
0525 hwq_attr.type = HWQ_TYPE_CTX;
0526 rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
0527 if (rc)
0528 goto fail;
0529
0530
0531 hwq_attr.depth = ctx->mrw_count;
0532 hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
0533 rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
0534 if (rc)
0535 goto fail;
0536
0537
0538 hwq_attr.depth = ctx->srqc_count;
0539 hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
0540 rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
0541 if (rc)
0542 goto fail;
0543
0544
0545 hwq_attr.depth = ctx->cq_count;
0546 hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
0547 rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
0548 if (rc)
0549 goto fail;
0550
0551
0552 rc = bnxt_qplib_setup_tqm_rings(res, ctx);
0553 if (rc)
0554 goto fail;
0555
0556 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
0557 hwq_attr.depth = ctx->qpc_count * 16;
0558 hwq_attr.stride = 1;
0559 rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
0560 if (rc)
0561 goto fail;
0562 stats_alloc:
0563
0564 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
0565 if (rc)
0566 goto fail;
0567
0568 return 0;
0569
0570 fail:
0571 bnxt_qplib_free_ctx(res, ctx);
0572 return rc;
0573 }
0574
0575 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
0576 struct bnxt_qplib_sgid_tbl *sgid_tbl)
0577 {
0578 kfree(sgid_tbl->tbl);
0579 kfree(sgid_tbl->hw_id);
0580 kfree(sgid_tbl->ctx);
0581 kfree(sgid_tbl->vlan);
0582 sgid_tbl->tbl = NULL;
0583 sgid_tbl->hw_id = NULL;
0584 sgid_tbl->ctx = NULL;
0585 sgid_tbl->vlan = NULL;
0586 sgid_tbl->max = 0;
0587 sgid_tbl->active = 0;
0588 }
0589
0590 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
0591 struct bnxt_qplib_sgid_tbl *sgid_tbl,
0592 u16 max)
0593 {
0594 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
0595 if (!sgid_tbl->tbl)
0596 return -ENOMEM;
0597
0598 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
0599 if (!sgid_tbl->hw_id)
0600 goto out_free1;
0601
0602 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
0603 if (!sgid_tbl->ctx)
0604 goto out_free2;
0605
0606 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
0607 if (!sgid_tbl->vlan)
0608 goto out_free3;
0609
0610 sgid_tbl->max = max;
0611 return 0;
0612 out_free3:
0613 kfree(sgid_tbl->ctx);
0614 sgid_tbl->ctx = NULL;
0615 out_free2:
0616 kfree(sgid_tbl->hw_id);
0617 sgid_tbl->hw_id = NULL;
0618 out_free1:
0619 kfree(sgid_tbl->tbl);
0620 sgid_tbl->tbl = NULL;
0621 return -ENOMEM;
0622 };
0623
0624 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
0625 struct bnxt_qplib_sgid_tbl *sgid_tbl)
0626 {
0627 int i;
0628
0629 for (i = 0; i < sgid_tbl->max; i++) {
0630 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
0631 sizeof(bnxt_qplib_gid_zero)))
0632 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
0633 sgid_tbl->tbl[i].vlan_id, true);
0634 }
0635 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
0636 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
0637 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
0638 sgid_tbl->active = 0;
0639 }
0640
0641 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
0642 struct net_device *netdev)
0643 {
0644 u32 i;
0645
0646 for (i = 0; i < sgid_tbl->max; i++)
0647 sgid_tbl->tbl[i].vlan_id = 0xffff;
0648
0649 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
0650 }
0651
0652
0653 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
0654 {
0655 u32 bit_num;
0656
0657 bit_num = find_first_bit(pdt->tbl, pdt->max);
0658 if (bit_num == pdt->max)
0659 return -ENOMEM;
0660
0661
0662 clear_bit(bit_num, pdt->tbl);
0663 pd->id = bit_num;
0664 return 0;
0665 }
0666
0667 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
0668 struct bnxt_qplib_pd_tbl *pdt,
0669 struct bnxt_qplib_pd *pd)
0670 {
0671 if (test_and_set_bit(pd->id, pdt->tbl)) {
0672 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
0673 pd->id);
0674 return -EINVAL;
0675 }
0676 pd->id = 0;
0677 return 0;
0678 }
0679
0680 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
0681 {
0682 kfree(pdt->tbl);
0683 pdt->tbl = NULL;
0684 pdt->max = 0;
0685 }
0686
0687 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
0688 struct bnxt_qplib_pd_tbl *pdt,
0689 u32 max)
0690 {
0691 u32 bytes;
0692
0693 bytes = max >> 3;
0694 if (!bytes)
0695 bytes = 1;
0696 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
0697 if (!pdt->tbl)
0698 return -ENOMEM;
0699
0700 pdt->max = max;
0701 memset((u8 *)pdt->tbl, 0xFF, bytes);
0702
0703 return 0;
0704 }
0705
0706
0707 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
0708 struct bnxt_qplib_dpi *dpi,
0709 void *app)
0710 {
0711 u32 bit_num;
0712
0713 bit_num = find_first_bit(dpit->tbl, dpit->max);
0714 if (bit_num == dpit->max)
0715 return -ENOMEM;
0716
0717
0718 clear_bit(bit_num, dpit->tbl);
0719 dpit->app_tbl[bit_num] = app;
0720
0721 dpi->dpi = bit_num;
0722 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
0723 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
0724
0725 return 0;
0726 }
0727
0728 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
0729 struct bnxt_qplib_dpi_tbl *dpit,
0730 struct bnxt_qplib_dpi *dpi)
0731 {
0732 if (dpi->dpi >= dpit->max) {
0733 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
0734 return -EINVAL;
0735 }
0736 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
0737 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
0738 dpi->dpi);
0739 return -EINVAL;
0740 }
0741 if (dpit->app_tbl)
0742 dpit->app_tbl[dpi->dpi] = NULL;
0743 memset(dpi, 0, sizeof(*dpi));
0744
0745 return 0;
0746 }
0747
0748 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
0749 struct bnxt_qplib_dpi_tbl *dpit)
0750 {
0751 kfree(dpit->tbl);
0752 kfree(dpit->app_tbl);
0753 if (dpit->dbr_bar_reg_iomem)
0754 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
0755 memset(dpit, 0, sizeof(*dpit));
0756 }
0757
0758 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
0759 struct bnxt_qplib_dpi_tbl *dpit,
0760 u32 dbr_offset)
0761 {
0762 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
0763 resource_size_t bar_reg_base;
0764 u32 dbr_len, bytes;
0765
0766 if (dpit->dbr_bar_reg_iomem) {
0767 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
0768 dbr_bar_reg);
0769 return -EALREADY;
0770 }
0771
0772 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
0773 if (!bar_reg_base) {
0774 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
0775 dbr_bar_reg);
0776 return -ENOMEM;
0777 }
0778
0779 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
0780 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
0781 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
0782 return -ENOMEM;
0783 }
0784
0785 dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
0786 dbr_len);
0787 if (!dpit->dbr_bar_reg_iomem) {
0788 dev_err(&res->pdev->dev,
0789 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
0790 return -ENOMEM;
0791 }
0792
0793 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
0794 dpit->max = dbr_len / PAGE_SIZE;
0795
0796 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
0797 if (!dpit->app_tbl)
0798 goto unmap_io;
0799
0800 bytes = dpit->max >> 3;
0801 if (!bytes)
0802 bytes = 1;
0803
0804 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
0805 if (!dpit->tbl) {
0806 kfree(dpit->app_tbl);
0807 dpit->app_tbl = NULL;
0808 goto unmap_io;
0809 }
0810
0811 memset((u8 *)dpit->tbl, 0xFF, bytes);
0812
0813 return 0;
0814
0815 unmap_io:
0816 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
0817 dpit->dbr_bar_reg_iomem = NULL;
0818 return -ENOMEM;
0819 }
0820
0821
0822 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
0823 struct bnxt_qplib_stats *stats)
0824 {
0825 if (stats->dma) {
0826 dma_free_coherent(&pdev->dev, stats->size,
0827 stats->dma, stats->dma_map);
0828 }
0829 memset(stats, 0, sizeof(*stats));
0830 stats->fw_id = -1;
0831 }
0832
0833 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
0834 struct bnxt_qplib_chip_ctx *cctx,
0835 struct bnxt_qplib_stats *stats)
0836 {
0837 memset(stats, 0, sizeof(*stats));
0838 stats->fw_id = -1;
0839 stats->size = cctx->hw_stats_size;
0840 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
0841 &stats->dma_map, GFP_KERNEL);
0842 if (!stats->dma) {
0843 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
0844 return -ENOMEM;
0845 }
0846 return 0;
0847 }
0848
0849 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
0850 {
0851 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
0852 }
0853
0854 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
0855 {
0856 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
0857
0858 return 0;
0859 }
0860
0861 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
0862 {
0863 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
0864 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
0865 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
0866 }
0867
0868 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
0869 struct net_device *netdev,
0870 struct bnxt_qplib_dev_attr *dev_attr)
0871 {
0872 int rc = 0;
0873
0874 res->pdev = pdev;
0875 res->netdev = netdev;
0876
0877 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
0878 if (rc)
0879 goto fail;
0880
0881 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
0882 if (rc)
0883 goto fail;
0884
0885 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
0886 if (rc)
0887 goto fail;
0888
0889 return 0;
0890 fail:
0891 bnxt_qplib_free_res(res);
0892 return rc;
0893 }
0894
0895 int bnxt_qplib_determine_atomics(struct pci_dev *dev)
0896 {
0897 int comp;
0898 u16 ctl2;
0899
0900 comp = pci_enable_atomic_ops_to_root(dev,
0901 PCI_EXP_DEVCAP2_ATOMIC_COMP32);
0902 if (comp)
0903 return -EOPNOTSUPP;
0904 comp = pci_enable_atomic_ops_to_root(dev,
0905 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
0906 if (comp)
0907 return -EOPNOTSUPP;
0908 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
0909 return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
0910 }