0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/mempool.h>
0025 #include <linux/slab.h>
0026 #include <linux/pci.h>
0027 #include <linux/interrupt.h>
0028
0029 #include <scsi/scsi.h>
0030 #include <scsi/scsi_device.h>
0031 #include <scsi/scsi_transport_fc.h>
0032 #include <scsi/fc/fc_fs.h>
0033
0034 #include "lpfc_hw4.h"
0035 #include "lpfc_hw.h"
0036 #include "lpfc_sli.h"
0037 #include "lpfc_sli4.h"
0038 #include "lpfc_nl.h"
0039 #include "lpfc_disc.h"
0040 #include "lpfc.h"
0041 #include "lpfc_scsi.h"
0042 #include "lpfc_crtn.h"
0043 #include "lpfc_logmsg.h"
0044
0045 #define LPFC_MBUF_POOL_SIZE 64
0046 #define LPFC_MEM_POOL_SIZE 64
0047 #define LPFC_DEVICE_DATA_POOL_SIZE 64
0048 #define LPFC_RRQ_POOL_SIZE 256
0049 #define LPFC_MBX_POOL_SIZE 256
0050
0051 int
0052 lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
0053 size_t bytes;
0054 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
0055
0056 if (max_xri <= 0)
0057 return -ENOMEM;
0058 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
0059 sizeof(unsigned long);
0060 phba->cfg_rrq_xri_bitmap_sz = bytes;
0061 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
0062 bytes);
0063 if (!phba->active_rrq_pool)
0064 return -ENOMEM;
0065 else
0066 return 0;
0067 }
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 int
0086 lpfc_mem_alloc(struct lpfc_hba *phba, int align)
0087 {
0088 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
0089 int i;
0090
0091
0092 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
0093 LPFC_BPL_SIZE,
0094 align, 0);
0095 if (!phba->lpfc_mbuf_pool)
0096 goto fail;
0097
0098 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
0099 sizeof(struct lpfc_dmabuf),
0100 GFP_KERNEL);
0101 if (!pool->elements)
0102 goto fail_free_lpfc_mbuf_pool;
0103
0104 pool->max_count = 0;
0105 pool->current_count = 0;
0106 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
0107 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
0108 GFP_KERNEL, &pool->elements[i].phys);
0109 if (!pool->elements[i].virt)
0110 goto fail_free_mbuf_pool;
0111 pool->max_count++;
0112 pool->current_count++;
0113 }
0114
0115 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE,
0116 sizeof(LPFC_MBOXQ_t));
0117 if (!phba->mbox_mem_pool)
0118 goto fail_free_mbuf_pool;
0119
0120 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
0121 sizeof(struct lpfc_nodelist));
0122 if (!phba->nlp_mem_pool)
0123 goto fail_free_mbox_pool;
0124
0125 if (phba->sli_rev == LPFC_SLI_REV4) {
0126 phba->rrq_pool =
0127 mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE,
0128 sizeof(struct lpfc_node_rrq));
0129 if (!phba->rrq_pool)
0130 goto fail_free_nlp_mem_pool;
0131 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
0132 &phba->pcidev->dev,
0133 LPFC_HDR_BUF_SIZE, align, 0);
0134 if (!phba->lpfc_hrb_pool)
0135 goto fail_free_rrq_mem_pool;
0136
0137 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
0138 &phba->pcidev->dev,
0139 LPFC_DATA_BUF_SIZE, align, 0);
0140 if (!phba->lpfc_drb_pool)
0141 goto fail_free_hrb_pool;
0142 phba->lpfc_hbq_pool = NULL;
0143 } else {
0144 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
0145 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
0146 if (!phba->lpfc_hbq_pool)
0147 goto fail_free_nlp_mem_pool;
0148 phba->lpfc_hrb_pool = NULL;
0149 phba->lpfc_drb_pool = NULL;
0150 }
0151
0152 if (phba->cfg_EnableXLane) {
0153 phba->device_data_mem_pool = mempool_create_kmalloc_pool(
0154 LPFC_DEVICE_DATA_POOL_SIZE,
0155 sizeof(struct lpfc_device_data));
0156 if (!phba->device_data_mem_pool)
0157 goto fail_free_drb_pool;
0158 } else {
0159 phba->device_data_mem_pool = NULL;
0160 }
0161
0162 return 0;
0163 fail_free_drb_pool:
0164 dma_pool_destroy(phba->lpfc_drb_pool);
0165 phba->lpfc_drb_pool = NULL;
0166 fail_free_hrb_pool:
0167 dma_pool_destroy(phba->lpfc_hrb_pool);
0168 phba->lpfc_hrb_pool = NULL;
0169 fail_free_rrq_mem_pool:
0170 mempool_destroy(phba->rrq_pool);
0171 phba->rrq_pool = NULL;
0172 fail_free_nlp_mem_pool:
0173 mempool_destroy(phba->nlp_mem_pool);
0174 phba->nlp_mem_pool = NULL;
0175 fail_free_mbox_pool:
0176 mempool_destroy(phba->mbox_mem_pool);
0177 phba->mbox_mem_pool = NULL;
0178 fail_free_mbuf_pool:
0179 while (i--)
0180 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
0181 pool->elements[i].phys);
0182 kfree(pool->elements);
0183 fail_free_lpfc_mbuf_pool:
0184 dma_pool_destroy(phba->lpfc_mbuf_pool);
0185 phba->lpfc_mbuf_pool = NULL;
0186 fail:
0187 return -ENOMEM;
0188 }
0189
0190 int
0191 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
0192 {
0193 phba->lpfc_nvmet_drb_pool =
0194 dma_pool_create("lpfc_nvmet_drb_pool",
0195 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
0196 SGL_ALIGN_SZ, 0);
0197 if (!phba->lpfc_nvmet_drb_pool) {
0198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0199 "6024 Can't enable NVME Target - no memory\n");
0200 return -ENOMEM;
0201 }
0202 return 0;
0203 }
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214 void
0215 lpfc_mem_free(struct lpfc_hba *phba)
0216 {
0217 int i;
0218 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
0219 struct lpfc_device_data *device_data;
0220
0221
0222 lpfc_sli_hbqbuf_free_all(phba);
0223 dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
0224 phba->lpfc_nvmet_drb_pool = NULL;
0225
0226 dma_pool_destroy(phba->lpfc_drb_pool);
0227 phba->lpfc_drb_pool = NULL;
0228
0229 dma_pool_destroy(phba->lpfc_hrb_pool);
0230 phba->lpfc_hrb_pool = NULL;
0231
0232 dma_pool_destroy(phba->lpfc_hbq_pool);
0233 phba->lpfc_hbq_pool = NULL;
0234
0235 mempool_destroy(phba->rrq_pool);
0236 phba->rrq_pool = NULL;
0237
0238
0239 mempool_destroy(phba->nlp_mem_pool);
0240 phba->nlp_mem_pool = NULL;
0241 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
0242 mempool_destroy(phba->active_rrq_pool);
0243 phba->active_rrq_pool = NULL;
0244 }
0245
0246
0247 mempool_destroy(phba->mbox_mem_pool);
0248 phba->mbox_mem_pool = NULL;
0249
0250
0251 for (i = 0; i < pool->current_count; i++)
0252 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
0253 pool->elements[i].phys);
0254 kfree(pool->elements);
0255
0256 dma_pool_destroy(phba->lpfc_mbuf_pool);
0257 phba->lpfc_mbuf_pool = NULL;
0258
0259
0260 if (phba->device_data_mem_pool) {
0261
0262 while (!list_empty(&phba->luns)) {
0263 device_data = list_first_entry(&phba->luns,
0264 struct lpfc_device_data,
0265 listentry);
0266 list_del(&device_data->listentry);
0267 mempool_free(device_data, phba->device_data_mem_pool);
0268 }
0269 mempool_destroy(phba->device_data_mem_pool);
0270 }
0271 phba->device_data_mem_pool = NULL;
0272 return;
0273 }
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 void
0287 lpfc_mem_free_all(struct lpfc_hba *phba)
0288 {
0289 struct lpfc_sli *psli = &phba->sli;
0290 LPFC_MBOXQ_t *mbox, *next_mbox;
0291 struct lpfc_dmabuf *mp;
0292
0293
0294 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
0295 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
0296 if (mp) {
0297 lpfc_mbuf_free(phba, mp->virt, mp->phys);
0298 kfree(mp);
0299 }
0300 list_del(&mbox->list);
0301 mempool_free(mbox, phba->mbox_mem_pool);
0302 }
0303
0304 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
0305 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
0306 if (mp) {
0307 lpfc_mbuf_free(phba, mp->virt, mp->phys);
0308 kfree(mp);
0309 }
0310 list_del(&mbox->list);
0311 mempool_free(mbox, phba->mbox_mem_pool);
0312 }
0313
0314 spin_lock_irq(&phba->hbalock);
0315 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
0316 spin_unlock_irq(&phba->hbalock);
0317 if (psli->mbox_active) {
0318 mbox = psli->mbox_active;
0319 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
0320 if (mp) {
0321 lpfc_mbuf_free(phba, mp->virt, mp->phys);
0322 kfree(mp);
0323 }
0324 mempool_free(mbox, phba->mbox_mem_pool);
0325 psli->mbox_active = NULL;
0326 }
0327
0328
0329 lpfc_mem_free(phba);
0330
0331
0332 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
0333 phba->lpfc_sg_dma_buf_pool = NULL;
0334
0335 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
0336 phba->lpfc_cmd_rsp_buf_pool = NULL;
0337
0338
0339 if (phba->cgn_i) {
0340 dma_free_coherent(&phba->pcidev->dev,
0341 sizeof(struct lpfc_cgn_info),
0342 phba->cgn_i->virt, phba->cgn_i->phys);
0343 kfree(phba->cgn_i);
0344 phba->cgn_i = NULL;
0345 }
0346
0347
0348 kfree(phba->rxtable);
0349 phba->rxtable = NULL;
0350
0351
0352 kfree(psli->iocbq_lookup);
0353 psli->iocbq_lookup = NULL;
0354
0355 return;
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376 void *
0377 lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
0378 {
0379 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
0380 unsigned long iflags;
0381 void *ret;
0382
0383 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
0384
0385 spin_lock_irqsave(&phba->hbalock, iflags);
0386 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
0387 pool->current_count--;
0388 ret = pool->elements[pool->current_count].virt;
0389 *handle = pool->elements[pool->current_count].phys;
0390 }
0391 spin_unlock_irqrestore(&phba->hbalock, iflags);
0392 return ret;
0393 }
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409 void
0410 __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
0411 {
0412 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
0413
0414 if (pool->current_count < pool->max_count) {
0415 pool->elements[pool->current_count].virt = virt;
0416 pool->elements[pool->current_count].phys = dma;
0417 pool->current_count++;
0418 } else {
0419 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
0420 }
0421 return;
0422 }
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 void
0438 lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
0439 {
0440 unsigned long iflags;
0441
0442 spin_lock_irqsave(&phba->hbalock, iflags);
0443 __lpfc_mbuf_free(phba, virt, dma);
0444 spin_unlock_irqrestore(&phba->hbalock, iflags);
0445 return;
0446 }
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462 void *
0463 lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
0464 {
0465 void *ret;
0466
0467 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
0468 return ret;
0469 }
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480 void
0481 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
0482 {
0483 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
0484 }
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 struct hbq_dmabuf *
0500 lpfc_els_hbq_alloc(struct lpfc_hba *phba)
0501 {
0502 struct hbq_dmabuf *hbqbp;
0503
0504 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
0505 if (!hbqbp)
0506 return NULL;
0507
0508 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
0509 &hbqbp->dbuf.phys);
0510 if (!hbqbp->dbuf.virt) {
0511 kfree(hbqbp);
0512 return NULL;
0513 }
0514 hbqbp->total_size = LPFC_BPL_SIZE;
0515 return hbqbp;
0516 }
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 void
0531 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
0532 {
0533 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
0534 kfree(hbqbp);
0535 return;
0536 }
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551 struct hbq_dmabuf *
0552 lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
0553 {
0554 struct hbq_dmabuf *dma_buf;
0555
0556 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
0557 if (!dma_buf)
0558 return NULL;
0559
0560 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
0561 &dma_buf->hbuf.phys);
0562 if (!dma_buf->hbuf.virt) {
0563 kfree(dma_buf);
0564 return NULL;
0565 }
0566 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
0567 &dma_buf->dbuf.phys);
0568 if (!dma_buf->dbuf.virt) {
0569 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
0570 dma_buf->hbuf.phys);
0571 kfree(dma_buf);
0572 return NULL;
0573 }
0574 dma_buf->total_size = LPFC_DATA_BUF_SIZE;
0575 return dma_buf;
0576 }
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 void
0591 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
0592 {
0593 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
0594 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
0595 kfree(dmab);
0596 }
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609 struct rqb_dmabuf *
0610 lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
0611 {
0612 struct rqb_dmabuf *dma_buf;
0613
0614 dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
0615 if (!dma_buf)
0616 return NULL;
0617
0618 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
0619 &dma_buf->hbuf.phys);
0620 if (!dma_buf->hbuf.virt) {
0621 kfree(dma_buf);
0622 return NULL;
0623 }
0624 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
0625 GFP_KERNEL, &dma_buf->dbuf.phys);
0626 if (!dma_buf->dbuf.virt) {
0627 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
0628 dma_buf->hbuf.phys);
0629 kfree(dma_buf);
0630 return NULL;
0631 }
0632 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
0633 return dma_buf;
0634 }
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648 void
0649 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
0650 {
0651 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
0652 dma_pool_free(phba->lpfc_nvmet_drb_pool,
0653 dmab->dbuf.virt, dmab->dbuf.phys);
0654 kfree(dmab);
0655 }
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669 void
0670 lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
0671 {
0672 struct hbq_dmabuf *hbq_entry;
0673 unsigned long flags;
0674
0675 if (!mp)
0676 return;
0677
0678 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
0679 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
0680
0681 spin_lock_irqsave(&phba->hbalock, flags);
0682 if (!phba->hbq_in_use) {
0683 spin_unlock_irqrestore(&phba->hbalock, flags);
0684 return;
0685 }
0686 list_del(&hbq_entry->dbuf.list);
0687 if (hbq_entry->tag == -1) {
0688 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
0689 (phba, hbq_entry);
0690 } else {
0691 lpfc_sli_free_hbq(phba, hbq_entry);
0692 }
0693 spin_unlock_irqrestore(&phba->hbalock, flags);
0694 } else {
0695 lpfc_mbuf_free(phba, mp->virt, mp->phys);
0696 kfree(mp);
0697 }
0698 return;
0699 }
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713 void
0714 lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
0715 {
0716 struct lpfc_rqb *rqbp;
0717 struct lpfc_rqe hrqe;
0718 struct lpfc_rqe drqe;
0719 struct rqb_dmabuf *rqb_entry;
0720 unsigned long flags;
0721 int rc;
0722
0723 if (!mp)
0724 return;
0725
0726 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
0727 rqbp = rqb_entry->hrq->rqbp;
0728
0729 spin_lock_irqsave(&phba->hbalock, flags);
0730 list_del(&rqb_entry->hbuf.list);
0731 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
0732 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
0733 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
0734 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
0735 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
0736 if (rc < 0) {
0737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0738 "6409 Cannot post to HRQ %d: %x %x %x "
0739 "DRQ %x %x\n",
0740 rqb_entry->hrq->queue_id,
0741 rqb_entry->hrq->host_index,
0742 rqb_entry->hrq->hba_index,
0743 rqb_entry->hrq->entry_count,
0744 rqb_entry->drq->host_index,
0745 rqb_entry->drq->hba_index);
0746 (rqbp->rqb_free_buffer)(phba, rqb_entry);
0747 } else {
0748 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
0749 rqbp->buffer_count++;
0750 }
0751
0752 spin_unlock_irqrestore(&phba->hbalock, flags);
0753 }