0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/kernel.h>
0036 #include <linux/pci.h>
0037 #include <linux/interrupt.h>
0038 #include <linux/cpumask.h>
0039 #include <linux/string.h>
0040
0041 #include "csio_init.h"
0042 #include "csio_hw.h"
0043
0044 static irqreturn_t
0045 csio_nondata_isr(int irq, void *dev_id)
0046 {
0047 struct csio_hw *hw = (struct csio_hw *) dev_id;
0048 int rv;
0049 unsigned long flags;
0050
0051 if (unlikely(!hw))
0052 return IRQ_NONE;
0053
0054 if (unlikely(pci_channel_offline(hw->pdev))) {
0055 CSIO_INC_STATS(hw, n_pcich_offline);
0056 return IRQ_NONE;
0057 }
0058
0059 spin_lock_irqsave(&hw->lock, flags);
0060 csio_hw_slow_intr_handler(hw);
0061 rv = csio_mb_isr_handler(hw);
0062
0063 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
0064 hw->flags |= CSIO_HWF_FWEVT_PENDING;
0065 spin_unlock_irqrestore(&hw->lock, flags);
0066 schedule_work(&hw->evtq_work);
0067 return IRQ_HANDLED;
0068 }
0069 spin_unlock_irqrestore(&hw->lock, flags);
0070 return IRQ_HANDLED;
0071 }
0072
0073
0074
0075
0076
0077
0078
0079
0080 static void
0081 csio_fwevt_handler(struct csio_hw *hw)
0082 {
0083 int rv;
0084 unsigned long flags;
0085
0086 rv = csio_fwevtq_handler(hw);
0087
0088 spin_lock_irqsave(&hw->lock, flags);
0089 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
0090 hw->flags |= CSIO_HWF_FWEVT_PENDING;
0091 spin_unlock_irqrestore(&hw->lock, flags);
0092 schedule_work(&hw->evtq_work);
0093 return;
0094 }
0095 spin_unlock_irqrestore(&hw->lock, flags);
0096
0097 }
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 static irqreturn_t
0108 csio_fwevt_isr(int irq, void *dev_id)
0109 {
0110 struct csio_hw *hw = (struct csio_hw *) dev_id;
0111
0112 if (unlikely(!hw))
0113 return IRQ_NONE;
0114
0115 if (unlikely(pci_channel_offline(hw->pdev))) {
0116 CSIO_INC_STATS(hw, n_pcich_offline);
0117 return IRQ_NONE;
0118 }
0119
0120 csio_fwevt_handler(hw);
0121
0122 return IRQ_HANDLED;
0123 }
0124
0125
0126
0127
0128
0129
0130 void
0131 csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
0132 struct csio_fl_dma_buf *flb, void *priv)
0133 {
0134 csio_fwevt_handler(hw);
0135 }
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145 static void
0146 csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
0147 struct csio_fl_dma_buf *flb, void *cbfn_q)
0148 {
0149 struct csio_ioreq *ioreq;
0150 uint8_t *scsiwr;
0151 uint8_t subop;
0152 void *cmnd;
0153 unsigned long flags;
0154
0155 ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
0156 if (likely(ioreq)) {
0157 if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
0158 subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
0159 ((struct fw_scsi_abrt_cls_wr *)
0160 scsiwr)->sub_opcode_to_chk_all_io);
0161
0162 csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
0163 subop ? "Close" : "Abort",
0164 ioreq, ioreq->wr_status);
0165
0166 spin_lock_irqsave(&hw->lock, flags);
0167 if (subop)
0168 csio_scsi_closed(ioreq,
0169 (struct list_head *)cbfn_q);
0170 else
0171 csio_scsi_aborted(ioreq,
0172 (struct list_head *)cbfn_q);
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 cmnd = csio_scsi_cmnd(ioreq);
0185 if (unlikely(cmnd == NULL))
0186 list_del_init(&ioreq->sm.sm_list);
0187
0188 spin_unlock_irqrestore(&hw->lock, flags);
0189
0190 if (unlikely(cmnd == NULL))
0191 csio_put_scsi_ioreq_lock(hw,
0192 csio_hw_to_scsim(hw), ioreq);
0193 } else {
0194 spin_lock_irqsave(&hw->lock, flags);
0195 csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
0196 spin_unlock_irqrestore(&hw->lock, flags);
0197 }
0198 }
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 static inline irqreturn_t
0212 csio_scsi_isr_handler(struct csio_q *iq)
0213 {
0214 struct csio_hw *hw = (struct csio_hw *)iq->owner;
0215 LIST_HEAD(cbfn_q);
0216 struct list_head *tmp;
0217 struct csio_scsim *scm;
0218 struct csio_ioreq *ioreq;
0219 int isr_completions = 0;
0220
0221 scm = csio_hw_to_scsim(hw);
0222
0223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
0224 &cbfn_q) != 0))
0225 return IRQ_NONE;
0226
0227
0228 list_for_each(tmp, &cbfn_q) {
0229 ioreq = (struct csio_ioreq *)tmp;
0230 isr_completions++;
0231 ioreq->io_cbfn(hw, ioreq);
0232
0233 if (unlikely(ioreq->dcopy))
0234 csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
0235 ioreq->nsge);
0236 }
0237
0238 if (isr_completions) {
0239
0240 csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
0241 isr_completions);
0242 }
0243
0244 return IRQ_HANDLED;
0245 }
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255 static irqreturn_t
0256 csio_scsi_isr(int irq, void *dev_id)
0257 {
0258 struct csio_q *iq = (struct csio_q *) dev_id;
0259 struct csio_hw *hw;
0260
0261 if (unlikely(!iq))
0262 return IRQ_NONE;
0263
0264 hw = (struct csio_hw *)iq->owner;
0265
0266 if (unlikely(pci_channel_offline(hw->pdev))) {
0267 CSIO_INC_STATS(hw, n_pcich_offline);
0268 return IRQ_NONE;
0269 }
0270
0271 csio_scsi_isr_handler(iq);
0272
0273 return IRQ_HANDLED;
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284 void
0285 csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
0286 struct csio_fl_dma_buf *flb, void *priv)
0287 {
0288 struct csio_q *iq = priv;
0289
0290 csio_scsi_isr_handler(iq);
0291
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301 static irqreturn_t
0302 csio_fcoe_isr(int irq, void *dev_id)
0303 {
0304 struct csio_hw *hw = (struct csio_hw *) dev_id;
0305 struct csio_q *intx_q = NULL;
0306 int rv;
0307 irqreturn_t ret = IRQ_NONE;
0308 unsigned long flags;
0309
0310 if (unlikely(!hw))
0311 return IRQ_NONE;
0312
0313 if (unlikely(pci_channel_offline(hw->pdev))) {
0314 CSIO_INC_STATS(hw, n_pcich_offline);
0315 return IRQ_NONE;
0316 }
0317
0318
0319 if (hw->intr_mode == CSIO_IM_INTX)
0320 csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
0321
0322
0323
0324
0325
0326 if (csio_hw_slow_intr_handler(hw))
0327 ret = IRQ_HANDLED;
0328
0329
0330 intx_q = csio_get_q(hw, hw->intr_iq_idx);
0331
0332 CSIO_DB_ASSERT(intx_q);
0333
0334
0335 if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
0336 ret = IRQ_HANDLED;
0337
0338 spin_lock_irqsave(&hw->lock, flags);
0339 rv = csio_mb_isr_handler(hw);
0340 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
0341 hw->flags |= CSIO_HWF_FWEVT_PENDING;
0342 spin_unlock_irqrestore(&hw->lock, flags);
0343 schedule_work(&hw->evtq_work);
0344 return IRQ_HANDLED;
0345 }
0346 spin_unlock_irqrestore(&hw->lock, flags);
0347
0348 return ret;
0349 }
0350
0351 static void
0352 csio_add_msix_desc(struct csio_hw *hw)
0353 {
0354 int i;
0355 struct csio_msix_entries *entryp = &hw->msix_entries[0];
0356 int k = CSIO_EXTRA_VECS;
0357 int len = sizeof(entryp->desc) - 1;
0358 int cnt = hw->num_sqsets + k;
0359
0360
0361 memset(entryp->desc, 0, len + 1);
0362 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
0363 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
0364
0365 entryp++;
0366 memset(entryp->desc, 0, len + 1);
0367 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
0368 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
0369 entryp++;
0370
0371
0372 for (i = k; i < cnt; i++, entryp++) {
0373 memset(entryp->desc, 0, len + 1);
0374 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
0375 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
0376 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
0377 }
0378 }
0379
0380 int
0381 csio_request_irqs(struct csio_hw *hw)
0382 {
0383 int rv, i, j, k = 0;
0384 struct csio_msix_entries *entryp = &hw->msix_entries[0];
0385 struct csio_scsi_cpu_info *info;
0386 struct pci_dev *pdev = hw->pdev;
0387
0388 if (hw->intr_mode != CSIO_IM_MSIX) {
0389 rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr,
0390 hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
0391 KBUILD_MODNAME, hw);
0392 if (rv) {
0393 csio_err(hw, "Failed to allocate interrupt line.\n");
0394 goto out_free_irqs;
0395 }
0396
0397 goto out;
0398 }
0399
0400
0401 csio_add_msix_desc(hw);
0402
0403 rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0,
0404 entryp[k].desc, hw);
0405 if (rv) {
0406 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
0407 pci_irq_vector(pdev, k), rv);
0408 goto out_free_irqs;
0409 }
0410
0411 entryp[k++].dev_id = hw;
0412
0413 rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0,
0414 entryp[k].desc, hw);
0415 if (rv) {
0416 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
0417 pci_irq_vector(pdev, k), rv);
0418 goto out_free_irqs;
0419 }
0420
0421 entryp[k++].dev_id = (void *)hw;
0422
0423
0424 for (i = 0; i < hw->num_pports; i++) {
0425 info = &hw->scsi_cpu_info[i];
0426 for (j = 0; j < info->max_cpus; j++, k++) {
0427 struct csio_scsi_qset *sqset = &hw->sqset[i][j];
0428 struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
0429
0430 rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0,
0431 entryp[k].desc, q);
0432 if (rv) {
0433 csio_err(hw,
0434 "IRQ request failed for vec %d err:%d\n",
0435 pci_irq_vector(pdev, k), rv);
0436 goto out_free_irqs;
0437 }
0438
0439 entryp[k].dev_id = q;
0440
0441 }
0442 }
0443
0444 out:
0445 hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
0446 return 0;
0447
0448 out_free_irqs:
0449 for (i = 0; i < k; i++)
0450 free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
0451 pci_free_irq_vectors(hw->pdev);
0452 return -EINVAL;
0453 }
0454
0455
0456 static void
0457 csio_reduce_sqsets(struct csio_hw *hw, int cnt)
0458 {
0459 int i;
0460 struct csio_scsi_cpu_info *info;
0461
0462 while (cnt < hw->num_sqsets) {
0463 for (i = 0; i < hw->num_pports; i++) {
0464 info = &hw->scsi_cpu_info[i];
0465 if (info->max_cpus > 1) {
0466 info->max_cpus--;
0467 hw->num_sqsets--;
0468 if (hw->num_sqsets <= cnt)
0469 break;
0470 }
0471 }
0472 }
0473
0474 csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
0475 }
0476
0477 static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs)
0478 {
0479 struct csio_hw *hw = affd->priv;
0480 u8 i;
0481
0482 if (!nvecs)
0483 return;
0484
0485 if (nvecs < hw->num_pports) {
0486 affd->nr_sets = 1;
0487 affd->set_size[0] = nvecs;
0488 return;
0489 }
0490
0491 affd->nr_sets = hw->num_pports;
0492 for (i = 0; i < hw->num_pports; i++)
0493 affd->set_size[i] = nvecs / hw->num_pports;
0494 }
0495
0496 static int
0497 csio_enable_msix(struct csio_hw *hw)
0498 {
0499 int i, j, k, n, min, cnt;
0500 int extra = CSIO_EXTRA_VECS;
0501 struct csio_scsi_cpu_info *info;
0502 struct irq_affinity desc = {
0503 .pre_vectors = CSIO_EXTRA_VECS,
0504 .calc_sets = csio_calc_sets,
0505 .priv = hw,
0506 };
0507
0508 if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
0509 return -ENOSPC;
0510
0511 min = hw->num_pports + extra;
0512 cnt = hw->num_sqsets + extra;
0513
0514
0515 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
0516 cnt = min_t(uint8_t, hw->cfg_niq, cnt);
0517
0518 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
0519
0520 cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
0521 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
0522 if (cnt < 0)
0523 return cnt;
0524
0525 if (cnt < (hw->num_sqsets + extra)) {
0526 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
0527 csio_reduce_sqsets(hw, cnt - extra);
0528 }
0529
0530
0531 k = 0;
0532 csio_set_nondata_intr_idx(hw, k);
0533 csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
0534 csio_set_fwevt_intr_idx(hw, k++);
0535
0536 for (i = 0; i < hw->num_pports; i++) {
0537 info = &hw->scsi_cpu_info[i];
0538
0539 for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
0540 n = (j % info->max_cpus) + k;
0541 hw->sqset[i][j].intr_idx = n;
0542 }
0543
0544 k += info->max_cpus;
0545 }
0546
0547 return 0;
0548 }
0549
0550 void
0551 csio_intr_enable(struct csio_hw *hw)
0552 {
0553 hw->intr_mode = CSIO_IM_NONE;
0554 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
0555
0556
0557 if ((csio_msi == 2) && !csio_enable_msix(hw))
0558 hw->intr_mode = CSIO_IM_MSIX;
0559 else {
0560
0561 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
0562 !csio_is_hw_master(hw)) {
0563 int extra = CSIO_EXTRA_MSI_IQS;
0564
0565 if (hw->cfg_niq < (hw->num_sqsets + extra)) {
0566 csio_dbg(hw, "Reducing sqsets to %d\n",
0567 hw->cfg_niq - extra);
0568 csio_reduce_sqsets(hw, hw->cfg_niq - extra);
0569 }
0570 }
0571
0572 if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
0573 hw->intr_mode = CSIO_IM_MSI;
0574 else
0575 hw->intr_mode = CSIO_IM_INTX;
0576 }
0577
0578 csio_dbg(hw, "Using %s interrupt mode.\n",
0579 (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
0580 ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
0581 }
0582
0583 void
0584 csio_intr_disable(struct csio_hw *hw, bool free)
0585 {
0586 csio_hw_intr_disable(hw);
0587
0588 if (free) {
0589 int i;
0590
0591 switch (hw->intr_mode) {
0592 case CSIO_IM_MSIX:
0593 for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
0594 free_irq(pci_irq_vector(hw->pdev, i),
0595 hw->msix_entries[i].dev_id);
0596 }
0597 break;
0598 case CSIO_IM_MSI:
0599 case CSIO_IM_INTX:
0600 free_irq(pci_irq_vector(hw->pdev, 0), hw);
0601 break;
0602 default:
0603 break;
0604 }
0605 }
0606
0607 pci_free_irq_vectors(hw->pdev);
0608 hw->intr_mode = CSIO_IM_NONE;
0609 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
0610 }