0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0036
0037 #include <linux/kernel.h>
0038 #include <linux/module.h>
0039 #include <linux/init.h>
0040 #include <linux/pci.h>
0041 #include <linux/aer.h>
0042 #include <linux/mm.h>
0043 #include <linux/notifier.h>
0044 #include <linux/kdebug.h>
0045 #include <linux/seq_file.h>
0046 #include <linux/debugfs.h>
0047 #include <linux/string.h>
0048 #include <linux/export.h>
0049
0050 #include "csio_init.h"
0051 #include "csio_defs.h"
0052
0053 #define CSIO_MIN_MEMPOOL_SZ 64
0054
0055 static struct dentry *csio_debugfs_root;
0056
0057 static struct scsi_transport_template *csio_fcoe_transport;
0058 static struct scsi_transport_template *csio_fcoe_transport_vport;
0059
0060
0061
0062
0063 static ssize_t
0064 csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
0065 {
0066 loff_t pos = *ppos;
0067 loff_t avail = file_inode(file)->i_size;
0068 unsigned int mem = (uintptr_t)file->private_data & 3;
0069 struct csio_hw *hw = file->private_data - mem;
0070
0071 if (pos < 0)
0072 return -EINVAL;
0073 if (pos >= avail)
0074 return 0;
0075 if (count > avail - pos)
0076 count = avail - pos;
0077
0078 while (count) {
0079 size_t len;
0080 int ret, ofst;
0081 __be32 data[16];
0082
0083 if (mem == MEM_MC)
0084 ret = hw->chip_ops->chip_mc_read(hw, 0, pos,
0085 data, NULL);
0086 else
0087 ret = hw->chip_ops->chip_edc_read(hw, mem, pos,
0088 data, NULL);
0089 if (ret)
0090 return ret;
0091
0092 ofst = pos % sizeof(data);
0093 len = min(count, sizeof(data) - ofst);
0094 if (copy_to_user(buf, (u8 *)data + ofst, len))
0095 return -EFAULT;
0096
0097 buf += len;
0098 pos += len;
0099 count -= len;
0100 }
0101 count = pos - *ppos;
0102 *ppos = pos;
0103 return count;
0104 }
0105
0106 static const struct file_operations csio_mem_debugfs_fops = {
0107 .owner = THIS_MODULE,
0108 .open = simple_open,
0109 .read = csio_mem_read,
0110 .llseek = default_llseek,
0111 };
0112
0113 void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
0114 unsigned int idx, unsigned int size_mb)
0115 {
0116 debugfs_create_file_size(name, S_IRUSR, hw->debugfs_root,
0117 (void *)hw + idx, &csio_mem_debugfs_fops,
0118 size_mb << 20);
0119 }
0120
0121 static int csio_setup_debugfs(struct csio_hw *hw)
0122 {
0123 int i;
0124
0125 if (IS_ERR_OR_NULL(hw->debugfs_root))
0126 return -1;
0127
0128 i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
0129 if (i & EDRAM0_ENABLE_F)
0130 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
0131 if (i & EDRAM1_ENABLE_F)
0132 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
0133
0134 hw->chip_ops->chip_dfs_create_ext_mem(hw);
0135 return 0;
0136 }
0137
0138
0139
0140
0141
0142 static int
0143 csio_dfs_create(struct csio_hw *hw)
0144 {
0145 if (csio_debugfs_root) {
0146 hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
0147 csio_debugfs_root);
0148 csio_setup_debugfs(hw);
0149 }
0150
0151 return 0;
0152 }
0153
0154
0155
0156
0157 static void
0158 csio_dfs_destroy(struct csio_hw *hw)
0159 {
0160 debugfs_remove_recursive(hw->debugfs_root);
0161 }
0162
0163
0164
0165
0166
0167 static void
0168 csio_dfs_init(void)
0169 {
0170 csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
0171 }
0172
0173
0174
0175
0176 static void
0177 csio_dfs_exit(void)
0178 {
0179 debugfs_remove(csio_debugfs_root);
0180 }
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190 static int
0191 csio_pci_init(struct pci_dev *pdev, int *bars)
0192 {
0193 int rv = -ENODEV;
0194
0195 *bars = pci_select_bars(pdev, IORESOURCE_MEM);
0196
0197 if (pci_enable_device_mem(pdev))
0198 goto err;
0199
0200 if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
0201 goto err_disable_device;
0202
0203 pci_set_master(pdev);
0204 pci_try_set_mwi(pdev);
0205
0206 rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0207 if (rv)
0208 rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0209 if (rv) {
0210 rv = -ENODEV;
0211 dev_err(&pdev->dev, "No suitable DMA available.\n");
0212 goto err_release_regions;
0213 }
0214
0215 return 0;
0216
0217 err_release_regions:
0218 pci_release_selected_regions(pdev, *bars);
0219 err_disable_device:
0220 pci_disable_device(pdev);
0221 err:
0222 return rv;
0223
0224 }
0225
0226
0227
0228
0229
0230
0231
0232 static void
0233 csio_pci_exit(struct pci_dev *pdev, int *bars)
0234 {
0235 pci_release_selected_regions(pdev, *bars);
0236 pci_disable_device(pdev);
0237 }
0238
0239
0240
0241
0242
0243
0244 static void
0245 csio_hw_init_workers(struct csio_hw *hw)
0246 {
0247 INIT_WORK(&hw->evtq_work, csio_evtq_worker);
0248 }
0249
0250 static void
0251 csio_hw_exit_workers(struct csio_hw *hw)
0252 {
0253 cancel_work_sync(&hw->evtq_work);
0254 }
0255
0256 static int
0257 csio_create_queues(struct csio_hw *hw)
0258 {
0259 int i, j;
0260 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
0261 int rv;
0262 struct csio_scsi_cpu_info *info;
0263
0264 if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
0265 return 0;
0266
0267 if (hw->intr_mode != CSIO_IM_MSIX) {
0268 rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
0269 0, hw->pport[0].portid, false, NULL);
0270 if (rv != 0) {
0271 csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
0272 return rv;
0273 }
0274 }
0275
0276
0277 rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
0278 csio_get_fwevt_intr_idx(hw),
0279 hw->pport[0].portid, true, NULL);
0280 if (rv != 0) {
0281 csio_err(hw, "FW event IQ config failed!: %d\n", rv);
0282 return rv;
0283 }
0284
0285
0286 rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
0287 mgmtm->iq_idx, hw->pport[0].portid, NULL);
0288
0289 if (rv != 0) {
0290 csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
0291 goto err;
0292 }
0293
0294
0295 for (i = 0; i < hw->num_pports; i++) {
0296 info = &hw->scsi_cpu_info[i];
0297
0298 for (j = 0; j < info->max_cpus; j++) {
0299 struct csio_scsi_qset *sqset = &hw->sqset[i][j];
0300
0301 rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
0302 sqset->intr_idx, i, false, NULL);
0303 if (rv != 0) {
0304 csio_err(hw,
0305 "SCSI module IQ config failed [%d][%d]:%d\n",
0306 i, j, rv);
0307 goto err;
0308 }
0309 rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
0310 sqset->iq_idx, i, NULL);
0311 if (rv != 0) {
0312 csio_err(hw,
0313 "SCSI module EQ config failed [%d][%d]:%d\n",
0314 i, j, rv);
0315 goto err;
0316 }
0317 }
0318 }
0319
0320 hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
0321 return 0;
0322 err:
0323 csio_wr_destroy_queues(hw, true);
0324 return -EINVAL;
0325 }
0326
0327
0328
0329
0330
0331
0332
0333 int
0334 csio_config_queues(struct csio_hw *hw)
0335 {
0336 int i, j, idx, k = 0;
0337 int rv;
0338 struct csio_scsi_qset *sqset;
0339 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
0340 struct csio_scsi_qset *orig;
0341 struct csio_scsi_cpu_info *info;
0342
0343 if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
0344 return csio_create_queues(hw);
0345
0346
0347 hw->num_scsi_msix_cpus = num_online_cpus();
0348 hw->num_sqsets = num_online_cpus() * hw->num_pports;
0349
0350 if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
0351 hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
0352 hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
0353 }
0354
0355
0356 for (i = 0; i < hw->num_pports; i++)
0357 hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
0358
0359 csio_dbg(hw, "nsqsets:%d scpus:%d\n",
0360 hw->num_sqsets, hw->num_scsi_msix_cpus);
0361
0362 csio_intr_enable(hw);
0363
0364 if (hw->intr_mode != CSIO_IM_MSIX) {
0365
0366
0367 hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
0368 CSIO_INTR_WRSIZE, CSIO_INGRESS,
0369 (void *)hw, 0, 0, NULL);
0370 if (hw->intr_iq_idx == -1) {
0371 csio_err(hw,
0372 "Forward interrupt queue creation failed\n");
0373 goto intr_disable;
0374 }
0375 }
0376
0377
0378 hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
0379 CSIO_FWEVT_WRSIZE,
0380 CSIO_INGRESS, (void *)hw,
0381 CSIO_FWEVT_FLBUFS, 0,
0382 csio_fwevt_intx_handler);
0383 if (hw->fwevt_iq_idx == -1) {
0384 csio_err(hw, "FW evt queue creation failed\n");
0385 goto intr_disable;
0386 }
0387
0388
0389 mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
0390 CSIO_MGMT_EQ_WRSIZE,
0391 CSIO_EGRESS, (void *)hw, 0, 0, NULL);
0392 if (mgmtm->eq_idx == -1) {
0393 csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
0394 goto intr_disable;
0395 }
0396
0397
0398 mgmtm->iq_idx = hw->fwevt_iq_idx;
0399
0400
0401 for (i = 0; i < hw->num_pports; i++) {
0402 info = &hw->scsi_cpu_info[i];
0403
0404 for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
0405 sqset = &hw->sqset[i][j];
0406
0407 if (j >= info->max_cpus) {
0408 k = j % info->max_cpus;
0409 orig = &hw->sqset[i][k];
0410 sqset->eq_idx = orig->eq_idx;
0411 sqset->iq_idx = orig->iq_idx;
0412 continue;
0413 }
0414
0415 idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
0416 CSIO_EGRESS, (void *)hw, 0, 0,
0417 NULL);
0418 if (idx == -1) {
0419 csio_err(hw, "EQ creation failed for idx:%d\n",
0420 idx);
0421 goto intr_disable;
0422 }
0423
0424 sqset->eq_idx = idx;
0425
0426 idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
0427 CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
0428 (void *)hw, 0, 0,
0429 csio_scsi_intx_handler);
0430 if (idx == -1) {
0431 csio_err(hw, "IQ creation failed for idx:%d\n",
0432 idx);
0433 goto intr_disable;
0434 }
0435 sqset->iq_idx = idx;
0436 }
0437 }
0438
0439 hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
0440
0441 rv = csio_create_queues(hw);
0442 if (rv != 0)
0443 goto intr_disable;
0444
0445
0446
0447
0448
0449 rv = csio_request_irqs(hw);
0450 if (rv != 0)
0451 return -EINVAL;
0452
0453 return 0;
0454
0455 intr_disable:
0456 csio_intr_disable(hw, false);
0457
0458 return -EINVAL;
0459 }
0460
0461 static int
0462 csio_resource_alloc(struct csio_hw *hw)
0463 {
0464 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
0465 int rv = -ENOMEM;
0466
0467 wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
0468 CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
0469
0470 hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
0471 sizeof(struct csio_mb));
0472 if (!hw->mb_mempool)
0473 goto err;
0474
0475 hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
0476 sizeof(struct csio_rnode));
0477 if (!hw->rnode_mempool)
0478 goto err_free_mb_mempool;
0479
0480 hw->scsi_dma_pool = dma_pool_create("csio_scsi_dma_pool",
0481 &hw->pdev->dev, CSIO_SCSI_RSP_LEN,
0482 8, 0);
0483 if (!hw->scsi_dma_pool)
0484 goto err_free_rn_pool;
0485
0486 return 0;
0487
0488 err_free_rn_pool:
0489 mempool_destroy(hw->rnode_mempool);
0490 hw->rnode_mempool = NULL;
0491 err_free_mb_mempool:
0492 mempool_destroy(hw->mb_mempool);
0493 hw->mb_mempool = NULL;
0494 err:
0495 return rv;
0496 }
0497
0498 static void
0499 csio_resource_free(struct csio_hw *hw)
0500 {
0501 dma_pool_destroy(hw->scsi_dma_pool);
0502 hw->scsi_dma_pool = NULL;
0503 mempool_destroy(hw->rnode_mempool);
0504 hw->rnode_mempool = NULL;
0505 mempool_destroy(hw->mb_mempool);
0506 hw->mb_mempool = NULL;
0507 }
0508
0509
0510
0511
0512
0513
0514
0515
0516 static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
0517 {
0518 struct csio_hw *hw;
0519
0520 hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
0521 if (!hw)
0522 goto err;
0523
0524 hw->pdev = pdev;
0525 strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
0526
0527
0528 if (csio_resource_alloc(hw))
0529 goto err_free_hw;
0530
0531
0532 hw->regstart = ioremap(pci_resource_start(pdev, 0),
0533 pci_resource_len(pdev, 0));
0534 if (!hw->regstart) {
0535 csio_err(hw, "Could not map BAR 0, regstart = %p\n",
0536 hw->regstart);
0537 goto err_resource_free;
0538 }
0539
0540 csio_hw_init_workers(hw);
0541
0542 if (csio_hw_init(hw))
0543 goto err_unmap_bar;
0544
0545 csio_dfs_create(hw);
0546
0547 csio_dbg(hw, "hw:%p\n", hw);
0548
0549 return hw;
0550
0551 err_unmap_bar:
0552 csio_hw_exit_workers(hw);
0553 iounmap(hw->regstart);
0554 err_resource_free:
0555 csio_resource_free(hw);
0556 err_free_hw:
0557 kfree(hw);
0558 err:
0559 return NULL;
0560 }
0561
0562
0563
0564
0565
0566
0567
0568 static void
0569 csio_hw_free(struct csio_hw *hw)
0570 {
0571 csio_intr_disable(hw, true);
0572 csio_hw_exit_workers(hw);
0573 csio_hw_exit(hw);
0574 iounmap(hw->regstart);
0575 csio_dfs_destroy(hw);
0576 csio_resource_free(hw);
0577 kfree(hw);
0578 }
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 struct csio_lnode *
0593 csio_shost_init(struct csio_hw *hw, struct device *dev,
0594 bool probe, struct csio_lnode *pln)
0595 {
0596 struct Scsi_Host *shost = NULL;
0597 struct csio_lnode *ln;
0598
0599 csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
0600 csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
0601
0602
0603
0604
0605
0606 if (dev == &hw->pdev->dev)
0607 shost = scsi_host_alloc(
0608 &csio_fcoe_shost_template,
0609 sizeof(struct csio_lnode));
0610 else
0611 shost = scsi_host_alloc(
0612 &csio_fcoe_shost_vport_template,
0613 sizeof(struct csio_lnode));
0614
0615 if (!shost)
0616 goto err;
0617
0618 ln = shost_priv(shost);
0619 memset(ln, 0, sizeof(struct csio_lnode));
0620
0621
0622 ln->dev_num = (shost->host_no << 16);
0623
0624 shost->can_queue = CSIO_MAX_QUEUE;
0625 shost->this_id = -1;
0626 shost->unique_id = shost->host_no;
0627 shost->max_cmd_len = 16;
0628 shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
0629 hw->fres_info.max_ssns);
0630 shost->max_lun = CSIO_MAX_LUN;
0631 if (dev == &hw->pdev->dev)
0632 shost->transportt = csio_fcoe_transport;
0633 else
0634 shost->transportt = csio_fcoe_transport_vport;
0635
0636
0637 if (!hw->rln)
0638 hw->rln = ln;
0639
0640
0641 if (csio_lnode_init(ln, hw, pln))
0642 goto err_shost_put;
0643
0644 if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev))
0645 goto err_lnode_exit;
0646
0647 return ln;
0648
0649 err_lnode_exit:
0650 csio_lnode_exit(ln);
0651 err_shost_put:
0652 scsi_host_put(shost);
0653 err:
0654 return NULL;
0655 }
0656
0657
0658
0659
0660
0661
0662 void
0663 csio_shost_exit(struct csio_lnode *ln)
0664 {
0665 struct Scsi_Host *shost = csio_ln_to_shost(ln);
0666 struct csio_hw *hw = csio_lnode_to_hw(ln);
0667
0668
0669 fc_remove_host(shost);
0670
0671
0672 scsi_remove_host(shost);
0673
0674
0675
0676
0677 spin_lock_irq(&hw->lock);
0678 csio_evtq_flush(hw);
0679 spin_unlock_irq(&hw->lock);
0680
0681 csio_lnode_exit(ln);
0682 scsi_host_put(shost);
0683 }
0684
0685 struct csio_lnode *
0686 csio_lnode_alloc(struct csio_hw *hw)
0687 {
0688 return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
0689 }
0690
0691 void
0692 csio_lnodes_block_request(struct csio_hw *hw)
0693 {
0694 struct Scsi_Host *shost;
0695 struct csio_lnode *sln;
0696 struct csio_lnode *ln;
0697 struct list_head *cur_ln, *cur_cln;
0698 struct csio_lnode **lnode_list;
0699 int cur_cnt = 0, ii;
0700
0701 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
0702 GFP_KERNEL);
0703 if (!lnode_list) {
0704 csio_err(hw, "Failed to allocate lnodes_list");
0705 return;
0706 }
0707
0708 spin_lock_irq(&hw->lock);
0709
0710 list_for_each(cur_ln, &hw->sln_head) {
0711 sln = (struct csio_lnode *) cur_ln;
0712 lnode_list[cur_cnt++] = sln;
0713
0714
0715 list_for_each(cur_cln, &sln->cln_head)
0716 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
0717 }
0718 spin_unlock_irq(&hw->lock);
0719
0720 for (ii = 0; ii < cur_cnt; ii++) {
0721 csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
0722 ln = lnode_list[ii];
0723 shost = csio_ln_to_shost(ln);
0724 scsi_block_requests(shost);
0725
0726 }
0727 kfree(lnode_list);
0728 }
0729
0730 void
0731 csio_lnodes_unblock_request(struct csio_hw *hw)
0732 {
0733 struct csio_lnode *ln;
0734 struct Scsi_Host *shost;
0735 struct csio_lnode *sln;
0736 struct list_head *cur_ln, *cur_cln;
0737 struct csio_lnode **lnode_list;
0738 int cur_cnt = 0, ii;
0739
0740 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
0741 GFP_KERNEL);
0742 if (!lnode_list) {
0743 csio_err(hw, "Failed to allocate lnodes_list");
0744 return;
0745 }
0746
0747 spin_lock_irq(&hw->lock);
0748
0749 list_for_each(cur_ln, &hw->sln_head) {
0750 sln = (struct csio_lnode *) cur_ln;
0751 lnode_list[cur_cnt++] = sln;
0752
0753
0754 list_for_each(cur_cln, &sln->cln_head)
0755 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
0756 }
0757 spin_unlock_irq(&hw->lock);
0758
0759 for (ii = 0; ii < cur_cnt; ii++) {
0760 csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
0761 ln = lnode_list[ii];
0762 shost = csio_ln_to_shost(ln);
0763 scsi_unblock_requests(shost);
0764 }
0765 kfree(lnode_list);
0766 }
0767
0768 void
0769 csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
0770 {
0771 struct csio_lnode *ln;
0772 struct Scsi_Host *shost;
0773 struct csio_lnode *sln;
0774 struct list_head *cur_ln, *cur_cln;
0775 struct csio_lnode **lnode_list;
0776 int cur_cnt = 0, ii;
0777
0778 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
0779 GFP_KERNEL);
0780 if (!lnode_list) {
0781 csio_err(hw, "Failed to allocate lnodes_list");
0782 return;
0783 }
0784
0785 spin_lock_irq(&hw->lock);
0786
0787 list_for_each(cur_ln, &hw->sln_head) {
0788 sln = (struct csio_lnode *) cur_ln;
0789 if (sln->portid != portid)
0790 continue;
0791
0792 lnode_list[cur_cnt++] = sln;
0793
0794
0795 list_for_each(cur_cln, &sln->cln_head)
0796 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
0797 }
0798 spin_unlock_irq(&hw->lock);
0799
0800 for (ii = 0; ii < cur_cnt; ii++) {
0801 csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
0802 ln = lnode_list[ii];
0803 shost = csio_ln_to_shost(ln);
0804 scsi_block_requests(shost);
0805 }
0806 kfree(lnode_list);
0807 }
0808
0809 void
0810 csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
0811 {
0812 struct csio_lnode *ln;
0813 struct Scsi_Host *shost;
0814 struct csio_lnode *sln;
0815 struct list_head *cur_ln, *cur_cln;
0816 struct csio_lnode **lnode_list;
0817 int cur_cnt = 0, ii;
0818
0819 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
0820 GFP_KERNEL);
0821 if (!lnode_list) {
0822 csio_err(hw, "Failed to allocate lnodes_list");
0823 return;
0824 }
0825
0826 spin_lock_irq(&hw->lock);
0827
0828 list_for_each(cur_ln, &hw->sln_head) {
0829 sln = (struct csio_lnode *) cur_ln;
0830 if (sln->portid != portid)
0831 continue;
0832 lnode_list[cur_cnt++] = sln;
0833
0834
0835 list_for_each(cur_cln, &sln->cln_head)
0836 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
0837 }
0838 spin_unlock_irq(&hw->lock);
0839
0840 for (ii = 0; ii < cur_cnt; ii++) {
0841 csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
0842 ln = lnode_list[ii];
0843 shost = csio_ln_to_shost(ln);
0844 scsi_unblock_requests(shost);
0845 }
0846 kfree(lnode_list);
0847 }
0848
0849 void
0850 csio_lnodes_exit(struct csio_hw *hw, bool npiv)
0851 {
0852 struct csio_lnode *sln;
0853 struct csio_lnode *ln;
0854 struct list_head *cur_ln, *cur_cln;
0855 struct csio_lnode **lnode_list;
0856 int cur_cnt = 0, ii;
0857
0858 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
0859 GFP_KERNEL);
0860 if (!lnode_list) {
0861 csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
0862 return;
0863 }
0864
0865
0866 spin_lock_irq(&hw->lock);
0867 list_for_each(cur_ln, &hw->sln_head) {
0868 sln = (struct csio_lnode *) cur_ln;
0869
0870
0871 list_for_each(cur_cln, &sln->cln_head)
0872 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
0873 }
0874 spin_unlock_irq(&hw->lock);
0875
0876
0877 for (ii = 0; ii < cur_cnt; ii++) {
0878 csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
0879 ln = lnode_list[ii];
0880 fc_vport_terminate(ln->fc_vport);
0881 }
0882
0883
0884 if (npiv)
0885 goto free_lnodes;
0886
0887 cur_cnt = 0;
0888
0889 spin_lock_irq(&hw->lock);
0890
0891 list_for_each(cur_ln, &hw->sln_head) {
0892 sln = (struct csio_lnode *) cur_ln;
0893 lnode_list[cur_cnt++] = sln;
0894 }
0895 spin_unlock_irq(&hw->lock);
0896
0897
0898 for (ii = 0; ii < cur_cnt; ii++) {
0899 csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
0900 csio_shost_exit(lnode_list[ii]);
0901 }
0902
0903 free_lnodes:
0904 kfree(lnode_list);
0905 }
0906
0907
0908
0909
0910
0911
0912 static void
0913 csio_lnode_init_post(struct csio_lnode *ln)
0914 {
0915 struct Scsi_Host *shost = csio_ln_to_shost(ln);
0916
0917 csio_fchost_attr_init(ln);
0918
0919 scsi_scan_host(shost);
0920 }
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940 static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
0941 {
0942 int rv;
0943 int bars;
0944 int i;
0945 struct csio_hw *hw;
0946 struct csio_lnode *ln;
0947
0948
0949 if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) &&
0950 !csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK)))
0951 return -ENODEV;
0952
0953 rv = csio_pci_init(pdev, &bars);
0954 if (rv)
0955 goto err;
0956
0957 hw = csio_hw_alloc(pdev);
0958 if (!hw) {
0959 rv = -ENODEV;
0960 goto err_pci_exit;
0961 }
0962
0963 if (!pcie_relaxed_ordering_enabled(pdev))
0964 hw->flags |= CSIO_HWF_ROOT_NO_RELAXED_ORDERING;
0965
0966 pci_set_drvdata(pdev, hw);
0967
0968 rv = csio_hw_start(hw);
0969 if (rv) {
0970 if (rv == -EINVAL) {
0971 dev_err(&pdev->dev,
0972 "Failed to start FW, continuing in debug mode.\n");
0973 return 0;
0974 }
0975 goto err_lnode_exit;
0976 }
0977
0978 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
0979 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
0980 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
0981 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
0982 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
0983
0984 for (i = 0; i < hw->num_pports; i++) {
0985 ln = csio_shost_init(hw, &pdev->dev, true, NULL);
0986 if (!ln) {
0987 rv = -ENODEV;
0988 break;
0989 }
0990
0991 ln->portid = hw->pport[i].portid;
0992
0993 spin_lock_irq(&hw->lock);
0994 if (csio_lnode_start(ln) != 0)
0995 rv = -ENODEV;
0996 spin_unlock_irq(&hw->lock);
0997
0998 if (rv)
0999 break;
1000
1001 csio_lnode_init_post(ln);
1002 }
1003
1004 if (rv)
1005 goto err_lnode_exit;
1006
1007 return 0;
1008
1009 err_lnode_exit:
1010 csio_lnodes_block_request(hw);
1011 spin_lock_irq(&hw->lock);
1012 csio_hw_stop(hw);
1013 spin_unlock_irq(&hw->lock);
1014 csio_lnodes_unblock_request(hw);
1015 csio_lnodes_exit(hw, 0);
1016 csio_hw_free(hw);
1017 err_pci_exit:
1018 csio_pci_exit(pdev, &bars);
1019 err:
1020 dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
1021 return rv;
1022 }
1023
1024
1025
1026
1027
1028
1029
1030 static void csio_remove_one(struct pci_dev *pdev)
1031 {
1032 struct csio_hw *hw = pci_get_drvdata(pdev);
1033 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
1034
1035 csio_lnodes_block_request(hw);
1036 spin_lock_irq(&hw->lock);
1037
1038
1039
1040
1041
1042 csio_hw_stop(hw);
1043 spin_unlock_irq(&hw->lock);
1044 csio_lnodes_unblock_request(hw);
1045
1046 csio_lnodes_exit(hw, 0);
1047 csio_hw_free(hw);
1048 csio_pci_exit(pdev, &bars);
1049 }
1050
1051
1052
1053
1054
1055
1056 static pci_ers_result_t
1057 csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
1058 {
1059 struct csio_hw *hw = pci_get_drvdata(pdev);
1060
1061 csio_lnodes_block_request(hw);
1062 spin_lock_irq(&hw->lock);
1063
1064
1065
1066
1067
1068 csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
1069 spin_unlock_irq(&hw->lock);
1070 csio_lnodes_unblock_request(hw);
1071 csio_lnodes_exit(hw, 0);
1072 csio_intr_disable(hw, true);
1073 pci_disable_device(pdev);
1074 return state == pci_channel_io_perm_failure ?
1075 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1076 }
1077
1078
1079
1080
1081
1082
1083 static pci_ers_result_t
1084 csio_pci_slot_reset(struct pci_dev *pdev)
1085 {
1086 struct csio_hw *hw = pci_get_drvdata(pdev);
1087 int ready;
1088
1089 if (pci_enable_device(pdev)) {
1090 dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
1091 return PCI_ERS_RESULT_DISCONNECT;
1092 }
1093
1094 pci_set_master(pdev);
1095 pci_restore_state(pdev);
1096 pci_save_state(pdev);
1097
1098
1099
1100
1101 spin_lock_irq(&hw->lock);
1102 csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
1103 ready = csio_is_hw_ready(hw);
1104 spin_unlock_irq(&hw->lock);
1105
1106 if (ready) {
1107 return PCI_ERS_RESULT_RECOVERED;
1108 } else {
1109 dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
1110 return PCI_ERS_RESULT_DISCONNECT;
1111 }
1112 }
1113
1114
1115
1116
1117
1118
1119 static void
1120 csio_pci_resume(struct pci_dev *pdev)
1121 {
1122 struct csio_hw *hw = pci_get_drvdata(pdev);
1123 struct csio_lnode *ln;
1124 int rv = 0;
1125 int i;
1126
1127
1128
1129 for (i = 0; i < hw->num_pports; i++) {
1130 ln = csio_shost_init(hw, &pdev->dev, true, NULL);
1131 if (!ln) {
1132 rv = -ENODEV;
1133 break;
1134 }
1135
1136 ln->portid = hw->pport[i].portid;
1137
1138 spin_lock_irq(&hw->lock);
1139 if (csio_lnode_start(ln) != 0)
1140 rv = -ENODEV;
1141 spin_unlock_irq(&hw->lock);
1142
1143 if (rv)
1144 break;
1145
1146 csio_lnode_init_post(ln);
1147 }
1148
1149 if (rv)
1150 goto err_resume_exit;
1151
1152 return;
1153
1154 err_resume_exit:
1155 csio_lnodes_block_request(hw);
1156 spin_lock_irq(&hw->lock);
1157 csio_hw_stop(hw);
1158 spin_unlock_irq(&hw->lock);
1159 csio_lnodes_unblock_request(hw);
1160 csio_lnodes_exit(hw, 0);
1161 csio_hw_free(hw);
1162 dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
1163 }
1164
1165 static struct pci_error_handlers csio_err_handler = {
1166 .error_detected = csio_pci_error_detected,
1167 .slot_reset = csio_pci_slot_reset,
1168 .resume = csio_pci_resume,
1169 };
1170
1171
1172
1173
1174 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
1175 static const struct pci_device_id csio_pci_tbl[] = {
1176
1177 #define CH_PCI_DEVICE_ID_FUNCTION 0x6
1178
1179 #define CH_PCI_ID_TABLE_ENTRY(devid) \
1180 { PCI_VDEVICE(CHELSIO, (devid)), 0 }
1181
1182 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
1183
1184 #include "t4_pci_id_tbl.h"
1185
1186 static struct pci_driver csio_pci_driver = {
1187 .name = KBUILD_MODNAME,
1188 .driver = {
1189 .owner = THIS_MODULE,
1190 },
1191 .id_table = csio_pci_tbl,
1192 .probe = csio_probe_one,
1193 .remove = csio_remove_one,
1194 .err_handler = &csio_err_handler,
1195 };
1196
1197
1198
1199
1200
1201 static int __init
1202 csio_init(void)
1203 {
1204 int rv = -ENOMEM;
1205
1206 pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
1207
1208 csio_dfs_init();
1209
1210 csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
1211 if (!csio_fcoe_transport)
1212 goto err;
1213
1214 csio_fcoe_transport_vport =
1215 fc_attach_transport(&csio_fc_transport_vport_funcs);
1216 if (!csio_fcoe_transport_vport)
1217 goto err_vport;
1218
1219 rv = pci_register_driver(&csio_pci_driver);
1220 if (rv)
1221 goto err_pci;
1222
1223 return 0;
1224
1225 err_pci:
1226 fc_release_transport(csio_fcoe_transport_vport);
1227 err_vport:
1228 fc_release_transport(csio_fcoe_transport);
1229 err:
1230 csio_dfs_exit();
1231 return rv;
1232 }
1233
1234
1235
1236
1237
1238
1239 static void __exit
1240 csio_exit(void)
1241 {
1242 pci_unregister_driver(&csio_pci_driver);
1243 csio_dfs_exit();
1244 fc_release_transport(csio_fcoe_transport_vport);
1245 fc_release_transport(csio_fcoe_transport);
1246 }
1247
1248 module_init(csio_init);
1249 module_exit(csio_exit);
1250 MODULE_AUTHOR(CSIO_DRV_AUTHOR);
1251 MODULE_DESCRIPTION(CSIO_DRV_DESC);
1252 MODULE_LICENSE("Dual BSD/GPL");
1253 MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
1254 MODULE_VERSION(CSIO_DRV_VERSION);
1255 MODULE_FIRMWARE(FW_FNAME_T5);
1256 MODULE_FIRMWARE(FW_FNAME_T6);
1257 MODULE_SOFTDEP("pre: cxgb4");