0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/pci.h>
0035 #include <linux/io.h>
0036 #include <linux/delay.h>
0037 #include <linux/vmalloc.h>
0038 #include <linux/aer.h>
0039 #include <linux/module.h>
0040
0041 #include "qib.h"
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 static void qib_tune_pcie_caps(struct qib_devdata *);
0056 static void qib_tune_pcie_coalesce(struct qib_devdata *);
0057
0058
0059
0060
0061
0062
0063
0064 int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
0065 {
0066 int ret;
0067
0068 ret = pci_enable_device(pdev);
0069 if (ret) {
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
0083 -ret);
0084 goto done;
0085 }
0086
0087 ret = pci_request_regions(pdev, QIB_DRV_NAME);
0088 if (ret) {
0089 qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
0090 goto bail;
0091 }
0092
0093 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0094 if (ret) {
0095
0096
0097
0098
0099
0100 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0101 if (ret) {
0102 qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
0103 goto bail;
0104 }
0105 }
0106
0107 pci_set_master(pdev);
0108 ret = pci_enable_pcie_error_reporting(pdev);
0109 if (ret) {
0110 qib_early_err(&pdev->dev,
0111 "Unable to enable pcie error reporting: %d\n",
0112 ret);
0113 ret = 0;
0114 }
0115 goto done;
0116
0117 bail:
0118 pci_disable_device(pdev);
0119 pci_release_regions(pdev);
0120 done:
0121 return ret;
0122 }
0123
0124
0125
0126
0127
0128
0129 int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
0130 const struct pci_device_id *ent)
0131 {
0132 unsigned long len;
0133 resource_size_t addr;
0134
0135 dd->pcidev = pdev;
0136 pci_set_drvdata(pdev, dd);
0137
0138 addr = pci_resource_start(pdev, 0);
0139 len = pci_resource_len(pdev, 0);
0140
0141 dd->kregbase = ioremap(addr, len);
0142 if (!dd->kregbase)
0143 return -ENOMEM;
0144
0145 dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
0146 dd->physaddr = addr;
0147
0148
0149
0150
0151
0152 dd->pcibar0 = addr;
0153 dd->pcibar1 = addr >> 32;
0154 dd->deviceid = ent->device;
0155 dd->vendorid = ent->vendor;
0156
0157 return 0;
0158 }
0159
0160
0161
0162
0163
0164
0165 void qib_pcie_ddcleanup(struct qib_devdata *dd)
0166 {
0167 u64 __iomem *base = (void __iomem *) dd->kregbase;
0168
0169 dd->kregbase = NULL;
0170 iounmap(base);
0171 if (dd->piobase)
0172 iounmap(dd->piobase);
0173 if (dd->userbase)
0174 iounmap(dd->userbase);
0175 if (dd->piovl15base)
0176 iounmap(dd->piovl15base);
0177
0178 pci_disable_device(dd->pcidev);
0179 pci_release_regions(dd->pcidev);
0180
0181 pci_set_drvdata(dd->pcidev, NULL);
0182 }
0183
0184
0185
0186
0187
0188
0189 static void qib_cache_msi_info(struct qib_devdata *dd, int pos)
0190 {
0191 struct pci_dev *pdev = dd->pcidev;
0192 u16 control;
0193
0194 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, &dd->msi_lo);
0195 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &dd->msi_hi);
0196 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
0197
0198
0199 pci_read_config_word(pdev,
0200 pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
0201 &dd->msi_data);
0202 }
0203
0204 int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
0205 {
0206 u16 linkstat, speed;
0207 int nvec;
0208 int maxvec;
0209 unsigned int flags = PCI_IRQ_MSIX | PCI_IRQ_MSI;
0210
0211 if (!pci_is_pcie(dd->pcidev)) {
0212 qib_dev_err(dd, "Can't find PCI Express capability!\n");
0213
0214 dd->lbus_width = 1;
0215 dd->lbus_speed = 2500;
0216 nvec = -1;
0217 goto bail;
0218 }
0219
0220 if (dd->flags & QIB_HAS_INTX)
0221 flags |= PCI_IRQ_LEGACY;
0222 maxvec = (nent && *nent) ? *nent : 1;
0223 nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
0224 if (nvec < 0)
0225 goto bail;
0226
0227
0228
0229
0230
0231
0232 if (nent)
0233 *nent = !dd->pcidev->msix_enabled ? 0 : nvec;
0234
0235 if (dd->pcidev->msi_enabled)
0236 qib_cache_msi_info(dd, dd->pcidev->msi_cap);
0237
0238 pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
0239
0240
0241
0242
0243 speed = linkstat & 0xf;
0244 linkstat >>= 4;
0245 linkstat &= 0x1f;
0246 dd->lbus_width = linkstat;
0247
0248 switch (speed) {
0249 case 1:
0250 dd->lbus_speed = 2500;
0251 break;
0252 case 2:
0253 dd->lbus_speed = 5000;
0254 break;
0255 default:
0256 dd->lbus_speed = 2500;
0257 break;
0258 }
0259
0260
0261
0262
0263
0264 if (minw && linkstat < minw)
0265 qib_dev_err(dd,
0266 "PCIe width %u (x%u HCA), performance reduced\n",
0267 linkstat, minw);
0268
0269 qib_tune_pcie_caps(dd);
0270
0271 qib_tune_pcie_coalesce(dd);
0272
0273 bail:
0274
0275 snprintf(dd->lbus_info, sizeof(dd->lbus_info),
0276 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
0277 return nvec < 0 ? nvec : 0;
0278 }
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288 void qib_free_irq(struct qib_devdata *dd)
0289 {
0290 pci_free_irq(dd->pcidev, 0, dd);
0291 pci_free_irq_vectors(dd->pcidev);
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 int qib_reinit_intr(struct qib_devdata *dd)
0303 {
0304 int pos;
0305 u16 control;
0306 int ret = 0;
0307
0308
0309 if (!dd->msi_lo)
0310 goto bail;
0311
0312 pos = dd->pcidev->msi_cap;
0313 if (!pos) {
0314 qib_dev_err(dd,
0315 "Can't find MSI capability, can't restore MSI settings\n");
0316 ret = 0;
0317
0318 goto bail;
0319 }
0320 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
0321 dd->msi_lo);
0322 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
0323 dd->msi_hi);
0324 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
0325 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
0326 control |= PCI_MSI_FLAGS_ENABLE;
0327 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
0328 control);
0329 }
0330
0331 pci_write_config_word(dd->pcidev, pos +
0332 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
0333 dd->msi_data);
0334 ret = 1;
0335 bail:
0336 qib_free_irq(dd);
0337
0338 if (!ret && (dd->flags & QIB_HAS_INTX))
0339 ret = 1;
0340
0341
0342 pci_set_master(dd->pcidev);
0343
0344 return ret;
0345 }
0346
0347
0348
0349
0350
0351 void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
0352 {
0353 pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
0354 pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
0355 pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
0356 }
0357
0358 void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
0359 {
0360 int r;
0361
0362 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
0363 dd->pcibar0);
0364 if (r)
0365 qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
0366 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
0367 dd->pcibar1);
0368 if (r)
0369 qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
0370
0371 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
0372 pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
0373 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
0374 r = pci_enable_device(dd->pcidev);
0375 if (r)
0376 qib_dev_err(dd,
0377 "pci_enable_device failed after reset: %d\n", r);
0378 }
0379
0380
0381 static int qib_pcie_coalesce;
0382 module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
0383 MODULE_PARM_DESC(pcie_coalesce, "tune PCIe coalescing on some Intel chipsets");
0384
0385
0386
0387
0388
0389
0390
0391 static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
0392 {
0393 struct pci_dev *parent;
0394 u16 devid;
0395 u32 mask, bits, val;
0396
0397 if (!qib_pcie_coalesce)
0398 return;
0399
0400
0401 parent = dd->pcidev->bus->self;
0402 if (parent->bus->parent) {
0403 qib_devinfo(dd->pcidev, "Parent not root\n");
0404 return;
0405 }
0406 if (!pci_is_pcie(parent))
0407 return;
0408 if (parent->vendor != 0x8086)
0409 return;
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420 devid = parent->device;
0421 if (devid >= 0x25e2 && devid <= 0x25fa) {
0422
0423 if (parent->revision <= 0xb2)
0424 bits = 1U << 10;
0425 else
0426 bits = 7U << 10;
0427 mask = (3U << 24) | (7U << 10);
0428 } else if (devid >= 0x65e2 && devid <= 0x65fa) {
0429
0430 bits = 1U << 10;
0431 mask = (3U << 24) | (7U << 10);
0432 } else if (devid >= 0x4021 && devid <= 0x402e) {
0433
0434 bits = 7U << 10;
0435 mask = 7U << 10;
0436 } else if (devid >= 0x3604 && devid <= 0x360a) {
0437
0438 bits = 7U << 10;
0439 mask = (3U << 24) | (7U << 10);
0440 } else {
0441
0442 return;
0443 }
0444 pci_read_config_dword(parent, 0x48, &val);
0445 val &= ~mask;
0446 val |= bits;
0447 pci_write_config_dword(parent, 0x48, val);
0448 }
0449
0450
0451
0452
0453
0454 static int qib_pcie_caps;
0455 module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
0456 MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
0457
0458 static void qib_tune_pcie_caps(struct qib_devdata *dd)
0459 {
0460 struct pci_dev *parent;
0461 u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
0462 u16 rc_mrrs, ep_mrrs, max_mrrs;
0463
0464
0465 parent = dd->pcidev->bus->self;
0466 if (!pci_is_root_bus(parent->bus)) {
0467 qib_devinfo(dd->pcidev, "Parent not root\n");
0468 return;
0469 }
0470
0471 if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
0472 return;
0473
0474 rc_mpss = parent->pcie_mpss;
0475 rc_mps = ffs(pcie_get_mps(parent)) - 8;
0476
0477 ep_mpss = dd->pcidev->pcie_mpss;
0478 ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
0479
0480
0481 if (rc_mpss > ep_mpss)
0482 rc_mpss = ep_mpss;
0483
0484
0485 if (rc_mpss > (qib_pcie_caps & 7))
0486 rc_mpss = qib_pcie_caps & 7;
0487
0488 if (rc_mpss > rc_mps) {
0489 rc_mps = rc_mpss;
0490 pcie_set_mps(parent, 128 << rc_mps);
0491 }
0492
0493 if (rc_mpss > ep_mps) {
0494 ep_mps = rc_mpss;
0495 pcie_set_mps(dd->pcidev, 128 << ep_mps);
0496 }
0497
0498
0499
0500
0501
0502
0503 max_mrrs = 5;
0504 if (max_mrrs > ((qib_pcie_caps >> 4) & 7))
0505 max_mrrs = (qib_pcie_caps >> 4) & 7;
0506
0507 max_mrrs = 128 << max_mrrs;
0508 rc_mrrs = pcie_get_readrq(parent);
0509 ep_mrrs = pcie_get_readrq(dd->pcidev);
0510
0511 if (max_mrrs > rc_mrrs) {
0512 rc_mrrs = max_mrrs;
0513 pcie_set_readrq(parent, rc_mrrs);
0514 }
0515 if (max_mrrs > ep_mrrs) {
0516 ep_mrrs = max_mrrs;
0517 pcie_set_readrq(dd->pcidev, ep_mrrs);
0518 }
0519 }
0520
0521
0522
0523
0524
0525
0526 static pci_ers_result_t
0527 qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
0528 {
0529 struct qib_devdata *dd = pci_get_drvdata(pdev);
0530 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
0531
0532 switch (state) {
0533 case pci_channel_io_normal:
0534 qib_devinfo(pdev, "State Normal, ignoring\n");
0535 break;
0536
0537 case pci_channel_io_frozen:
0538 qib_devinfo(pdev, "State Frozen, requesting reset\n");
0539 pci_disable_device(pdev);
0540 ret = PCI_ERS_RESULT_NEED_RESET;
0541 break;
0542
0543 case pci_channel_io_perm_failure:
0544 qib_devinfo(pdev, "State Permanent Failure, disabling\n");
0545 if (dd) {
0546
0547 dd->flags &= ~QIB_PRESENT;
0548 qib_disable_after_error(dd);
0549 }
0550
0551 ret = PCI_ERS_RESULT_DISCONNECT;
0552 break;
0553
0554 default:
0555 qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
0556 state);
0557 break;
0558 }
0559 return ret;
0560 }
0561
0562 static pci_ers_result_t
0563 qib_pci_mmio_enabled(struct pci_dev *pdev)
0564 {
0565 u64 words = 0U;
0566 struct qib_devdata *dd = pci_get_drvdata(pdev);
0567 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
0568
0569 if (dd && dd->pport) {
0570 words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
0571 if (words == ~0ULL)
0572 ret = PCI_ERS_RESULT_NEED_RESET;
0573 }
0574 qib_devinfo(pdev,
0575 "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n",
0576 words, ret);
0577 return ret;
0578 }
0579
0580 static pci_ers_result_t
0581 qib_pci_slot_reset(struct pci_dev *pdev)
0582 {
0583 qib_devinfo(pdev, "QIB slot_reset function called, ignored\n");
0584 return PCI_ERS_RESULT_CAN_RECOVER;
0585 }
0586
0587 static void
0588 qib_pci_resume(struct pci_dev *pdev)
0589 {
0590 struct qib_devdata *dd = pci_get_drvdata(pdev);
0591
0592 qib_devinfo(pdev, "QIB resume function called\n");
0593
0594
0595
0596
0597
0598 qib_init(dd, 1);
0599 }
0600
0601 const struct pci_error_handlers qib_pci_err_handler = {
0602 .error_detected = qib_pci_error_detected,
0603 .mmio_enabled = qib_pci_mmio_enabled,
0604 .slot_reset = qib_pci_slot_reset,
0605 .resume = qib_pci_resume,
0606 };