0001
0002
0003
0004
0005
0006 #include <linux/pci.h>
0007 #include <linux/io.h>
0008 #include <linux/delay.h>
0009 #include <linux/vmalloc.h>
0010 #include <linux/aer.h>
0011 #include <linux/module.h>
0012
0013 #include "hfi.h"
0014 #include "chip_registers.h"
0015 #include "aspm.h"
0016
0017
0018
0019
0020
0021
0022
0023
0024 int hfi1_pcie_init(struct hfi1_devdata *dd)
0025 {
0026 int ret;
0027 struct pci_dev *pdev = dd->pcidev;
0028
0029 ret = pci_enable_device(pdev);
0030 if (ret) {
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
0044 return ret;
0045 }
0046
0047 ret = pci_request_regions(pdev, DRIVER_NAME);
0048 if (ret) {
0049 dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
0050 goto bail;
0051 }
0052
0053 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0054 if (ret) {
0055
0056
0057
0058
0059
0060 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0061 if (ret) {
0062 dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
0063 goto bail;
0064 }
0065 }
0066
0067 pci_set_master(pdev);
0068 (void)pci_enable_pcie_error_reporting(pdev);
0069 return 0;
0070
0071 bail:
0072 hfi1_pcie_cleanup(pdev);
0073 return ret;
0074 }
0075
0076
0077
0078
0079 void hfi1_pcie_cleanup(struct pci_dev *pdev)
0080 {
0081 pci_disable_device(pdev);
0082
0083
0084
0085
0086 pci_release_regions(pdev);
0087 }
0088
0089
0090
0091
0092
0093
0094 int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
0095 {
0096 unsigned long len;
0097 resource_size_t addr;
0098 int ret = 0;
0099 u32 rcv_array_count;
0100
0101 addr = pci_resource_start(pdev, 0);
0102 len = pci_resource_len(pdev, 0);
0103
0104
0105
0106
0107
0108
0109
0110 if (len != TXE_PIO_SEND + TXE_PIO_SIZE) {
0111 dd_dev_err(dd, "chip PIO range does not match\n");
0112 return -EINVAL;
0113 }
0114
0115 dd->kregbase1 = ioremap(addr, RCV_ARRAY);
0116 if (!dd->kregbase1) {
0117 dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
0118 return -ENOMEM;
0119 }
0120 dd_dev_info(dd, "UC base1: %p for %x\n", dd->kregbase1, RCV_ARRAY);
0121
0122
0123 dd->revision = readq(dd->kregbase1 + CCE_REVISION);
0124 if (dd->revision == ~(u64)0) {
0125 dd_dev_err(dd, "Cannot read chip CSRs\n");
0126 goto nomem;
0127 }
0128
0129 rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
0130 dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
0131 dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
0132
0133 dd->kregbase2 = ioremap(
0134 addr + dd->base2_start,
0135 TXE_PIO_SEND - dd->base2_start);
0136 if (!dd->kregbase2) {
0137 dd_dev_err(dd, "UC mapping of kregbase2 failed\n");
0138 goto nomem;
0139 }
0140 dd_dev_info(dd, "UC base2: %p for %x\n", dd->kregbase2,
0141 TXE_PIO_SEND - dd->base2_start);
0142
0143 dd->piobase = ioremap_wc(addr + TXE_PIO_SEND, TXE_PIO_SIZE);
0144 if (!dd->piobase) {
0145 dd_dev_err(dd, "WC mapping of send buffers failed\n");
0146 goto nomem;
0147 }
0148 dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
0149
0150 dd->physaddr = addr;
0151
0152
0153
0154
0155
0156 dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
0157 rcv_array_count * 8);
0158 if (!dd->rcvarray_wc) {
0159 dd_dev_err(dd, "WC mapping of receive array failed\n");
0160 goto nomem;
0161 }
0162 dd_dev_info(dd, "WC RcvArray: %p for %x\n",
0163 dd->rcvarray_wc, rcv_array_count * 8);
0164
0165 dd->flags |= HFI1_PRESENT;
0166 return 0;
0167 nomem:
0168 ret = -ENOMEM;
0169 hfi1_pcie_ddcleanup(dd);
0170 return ret;
0171 }
0172
0173
0174
0175
0176
0177
0178 void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
0179 {
0180 dd->flags &= ~HFI1_PRESENT;
0181 if (dd->kregbase1)
0182 iounmap(dd->kregbase1);
0183 dd->kregbase1 = NULL;
0184 if (dd->kregbase2)
0185 iounmap(dd->kregbase2);
0186 dd->kregbase2 = NULL;
0187 if (dd->rcvarray_wc)
0188 iounmap(dd->rcvarray_wc);
0189 dd->rcvarray_wc = NULL;
0190 if (dd->piobase)
0191 iounmap(dd->piobase);
0192 dd->piobase = NULL;
0193 }
0194
0195
0196 static u32 extract_speed(u16 linkstat)
0197 {
0198 u32 speed;
0199
0200 switch (linkstat & PCI_EXP_LNKSTA_CLS) {
0201 default:
0202 case PCI_EXP_LNKSTA_CLS_2_5GB:
0203 speed = 2500;
0204 break;
0205 case PCI_EXP_LNKSTA_CLS_5_0GB:
0206 speed = 5000;
0207 break;
0208 case PCI_EXP_LNKSTA_CLS_8_0GB:
0209 speed = 8000;
0210 break;
0211 }
0212 return speed;
0213 }
0214
0215
0216 static u32 extract_width(u16 linkstat)
0217 {
0218 return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
0219 }
0220
0221
0222 static void update_lbus_info(struct hfi1_devdata *dd)
0223 {
0224 u16 linkstat;
0225 int ret;
0226
0227 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
0228 if (ret) {
0229 dd_dev_err(dd, "Unable to read from PCI config\n");
0230 return;
0231 }
0232
0233 dd->lbus_width = extract_width(linkstat);
0234 dd->lbus_speed = extract_speed(linkstat);
0235 snprintf(dd->lbus_info, sizeof(dd->lbus_info),
0236 "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
0237 }
0238
0239
0240
0241
0242
0243 int pcie_speeds(struct hfi1_devdata *dd)
0244 {
0245 u32 linkcap;
0246 struct pci_dev *parent = dd->pcidev->bus->self;
0247 int ret;
0248
0249 if (!pci_is_pcie(dd->pcidev)) {
0250 dd_dev_err(dd, "Can't find PCI Express capability!\n");
0251 return -EINVAL;
0252 }
0253
0254
0255 dd->link_gen3_capable = 1;
0256
0257 ret = pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
0258 if (ret) {
0259 dd_dev_err(dd, "Unable to read from PCI config\n");
0260 return pcibios_err_to_errno(ret);
0261 }
0262
0263 if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
0264 dd_dev_info(dd,
0265 "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
0266 linkcap & PCI_EXP_LNKCAP_SLS);
0267 dd->link_gen3_capable = 0;
0268 }
0269
0270
0271
0272
0273 if (parent &&
0274 (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
0275 dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
0276 dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
0277 dd->link_gen3_capable = 0;
0278 }
0279
0280
0281 update_lbus_info(dd);
0282
0283 dd_dev_info(dd, "%s\n", dd->lbus_info);
0284
0285 return 0;
0286 }
0287
0288
0289
0290
0291
0292
0293 int restore_pci_variables(struct hfi1_devdata *dd)
0294 {
0295 int ret;
0296
0297 ret = pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
0298 if (ret)
0299 goto error;
0300
0301 ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
0302 dd->pcibar0);
0303 if (ret)
0304 goto error;
0305
0306 ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
0307 dd->pcibar1);
0308 if (ret)
0309 goto error;
0310
0311 ret = pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
0312 if (ret)
0313 goto error;
0314
0315 ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL,
0316 dd->pcie_devctl);
0317 if (ret)
0318 goto error;
0319
0320 ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL,
0321 dd->pcie_lnkctl);
0322 if (ret)
0323 goto error;
0324
0325 ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
0326 dd->pcie_devctl2);
0327 if (ret)
0328 goto error;
0329
0330 ret = pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
0331 if (ret)
0332 goto error;
0333
0334 if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
0335 ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
0336 dd->pci_tph2);
0337 if (ret)
0338 goto error;
0339 }
0340 return 0;
0341
0342 error:
0343 dd_dev_err(dd, "Unable to write to PCI config\n");
0344 return pcibios_err_to_errno(ret);
0345 }
0346
0347
0348
0349
0350
0351
0352 int save_pci_variables(struct hfi1_devdata *dd)
0353 {
0354 int ret;
0355
0356 ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
0357 &dd->pcibar0);
0358 if (ret)
0359 goto error;
0360
0361 ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
0362 &dd->pcibar1);
0363 if (ret)
0364 goto error;
0365
0366 ret = pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
0367 if (ret)
0368 goto error;
0369
0370 ret = pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
0371 if (ret)
0372 goto error;
0373
0374 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL,
0375 &dd->pcie_devctl);
0376 if (ret)
0377 goto error;
0378
0379 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL,
0380 &dd->pcie_lnkctl);
0381 if (ret)
0382 goto error;
0383
0384 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
0385 &dd->pcie_devctl2);
0386 if (ret)
0387 goto error;
0388
0389 ret = pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
0390 if (ret)
0391 goto error;
0392
0393 if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
0394 ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
0395 &dd->pci_tph2);
0396 if (ret)
0397 goto error;
0398 }
0399 return 0;
0400
0401 error:
0402 dd_dev_err(dd, "Unable to read from PCI config\n");
0403 return pcibios_err_to_errno(ret);
0404 }
0405
0406
0407
0408
0409
0410 static int hfi1_pcie_caps;
0411 module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444);
0412 MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
0413
0414
0415
0416
0417
0418
0419 void tune_pcie_caps(struct hfi1_devdata *dd)
0420 {
0421 struct pci_dev *parent;
0422 u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
0423 u16 rc_mrrs, ep_mrrs, max_mrrs, ectl;
0424 int ret;
0425
0426
0427
0428
0429
0430 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
0431 if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
0432 dd_dev_info(dd, "Enabling PCIe extended tags\n");
0433 ectl |= PCI_EXP_DEVCTL_EXT_TAG;
0434 ret = pcie_capability_write_word(dd->pcidev,
0435 PCI_EXP_DEVCTL, ectl);
0436 if (ret)
0437 dd_dev_info(dd, "Unable to write to PCI config\n");
0438 }
0439
0440 parent = dd->pcidev->bus->self;
0441
0442
0443
0444
0445 if (!parent) {
0446 dd_dev_info(dd, "Parent not found\n");
0447 return;
0448 }
0449 if (!pci_is_root_bus(parent->bus)) {
0450 dd_dev_info(dd, "Parent not root\n");
0451 return;
0452 }
0453 if (!pci_is_pcie(parent)) {
0454 dd_dev_info(dd, "Parent is not PCI Express capable\n");
0455 return;
0456 }
0457 if (!pci_is_pcie(dd->pcidev)) {
0458 dd_dev_info(dd, "PCI device is not PCI Express capable\n");
0459 return;
0460 }
0461 rc_mpss = parent->pcie_mpss;
0462 rc_mps = ffs(pcie_get_mps(parent)) - 8;
0463
0464 ep_mpss = dd->pcidev->pcie_mpss;
0465 ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
0466
0467
0468 if (rc_mpss > ep_mpss)
0469 rc_mpss = ep_mpss;
0470
0471
0472 if (rc_mpss > (hfi1_pcie_caps & 7))
0473 rc_mpss = hfi1_pcie_caps & 7;
0474
0475 if (rc_mpss > rc_mps) {
0476 rc_mps = rc_mpss;
0477 pcie_set_mps(parent, 128 << rc_mps);
0478 }
0479
0480 if (rc_mpss > ep_mps) {
0481 ep_mps = rc_mpss;
0482 pcie_set_mps(dd->pcidev, 128 << ep_mps);
0483 }
0484
0485
0486
0487
0488
0489
0490 max_mrrs = 5;
0491 if (max_mrrs > ((hfi1_pcie_caps >> 4) & 7))
0492 max_mrrs = (hfi1_pcie_caps >> 4) & 7;
0493
0494 max_mrrs = 128 << max_mrrs;
0495 rc_mrrs = pcie_get_readrq(parent);
0496 ep_mrrs = pcie_get_readrq(dd->pcidev);
0497
0498 if (max_mrrs > rc_mrrs) {
0499 rc_mrrs = max_mrrs;
0500 pcie_set_readrq(parent, rc_mrrs);
0501 }
0502 if (max_mrrs > ep_mrrs) {
0503 ep_mrrs = max_mrrs;
0504 pcie_set_readrq(dd->pcidev, ep_mrrs);
0505 }
0506 }
0507
0508
0509
0510
0511
0512
0513
0514 static pci_ers_result_t
0515 pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
0516 {
0517 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
0518 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
0519
0520 switch (state) {
0521 case pci_channel_io_normal:
0522 dd_dev_info(dd, "State Normal, ignoring\n");
0523 break;
0524
0525 case pci_channel_io_frozen:
0526 dd_dev_info(dd, "State Frozen, requesting reset\n");
0527 pci_disable_device(pdev);
0528 ret = PCI_ERS_RESULT_NEED_RESET;
0529 break;
0530
0531 case pci_channel_io_perm_failure:
0532 if (dd) {
0533 dd_dev_info(dd, "State Permanent Failure, disabling\n");
0534
0535 dd->flags &= ~HFI1_PRESENT;
0536 hfi1_disable_after_error(dd);
0537 }
0538
0539 ret = PCI_ERS_RESULT_DISCONNECT;
0540 break;
0541
0542 default:
0543 dd_dev_info(dd, "HFI1 PCI errors detected (state %d)\n",
0544 state);
0545 break;
0546 }
0547 return ret;
0548 }
0549
0550 static pci_ers_result_t
0551 pci_mmio_enabled(struct pci_dev *pdev)
0552 {
0553 u64 words = 0U;
0554 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
0555 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
0556
0557 if (dd && dd->pport) {
0558 words = read_port_cntr(dd->pport, C_RX_WORDS, CNTR_INVALID_VL);
0559 if (words == ~0ULL)
0560 ret = PCI_ERS_RESULT_NEED_RESET;
0561 dd_dev_info(dd,
0562 "HFI1 mmio_enabled function called, read wordscntr %llx, returning %d\n",
0563 words, ret);
0564 }
0565 return ret;
0566 }
0567
0568 static pci_ers_result_t
0569 pci_slot_reset(struct pci_dev *pdev)
0570 {
0571 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
0572
0573 dd_dev_info(dd, "HFI1 slot_reset function called, ignored\n");
0574 return PCI_ERS_RESULT_CAN_RECOVER;
0575 }
0576
0577 static void
0578 pci_resume(struct pci_dev *pdev)
0579 {
0580 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
0581
0582 dd_dev_info(dd, "HFI1 resume function called\n");
0583
0584
0585
0586
0587
0588 hfi1_init(dd, 1);
0589 }
0590
0591 const struct pci_error_handlers hfi1_pci_err_handler = {
0592 .error_detected = pci_error_detected,
0593 .mmio_enabled = pci_mmio_enabled,
0594 .slot_reset = pci_slot_reset,
0595 .resume = pci_resume,
0596 };
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608 #define DL_STATUS_HFI0 0x1
0609 #define DL_STATUS_HFI1 0x2
0610 #define DL_STATUS_BOTH 0x3
0611
0612
0613 #define DL_ERR_NONE 0x0
0614 #define DL_ERR_SWAP_PARITY 0x1
0615
0616 #define DL_ERR_DISABLED 0x2
0617 #define DL_ERR_SECURITY 0x3
0618 #define DL_ERR_SBUS 0x4
0619 #define DL_ERR_XFR_PARITY 0x5
0620
0621
0622 #define SBR_DELAY_US 200000
0623
0624 static uint pcie_target = 3;
0625 module_param(pcie_target, uint, S_IRUGO);
0626 MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
0627
0628 static uint pcie_force;
0629 module_param(pcie_force, uint, S_IRUGO);
0630 MODULE_PARM_DESC(pcie_force, "Force driver to do a PCIe firmware download even if already at target speed");
0631
0632 static uint pcie_retry = 5;
0633 module_param(pcie_retry, uint, S_IRUGO);
0634 MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested speed");
0635
0636 #define UNSET_PSET 255
0637 #define DEFAULT_DISCRETE_PSET 2
0638 #define DEFAULT_MCP_PSET 6
0639 static uint pcie_pset = UNSET_PSET;
0640 module_param(pcie_pset, uint, S_IRUGO);
0641 MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
0642
0643 static uint pcie_ctle = 3;
0644 module_param(pcie_ctle, uint, S_IRUGO);
0645 MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
0646
0647
0648 #define PREC 0
0649 #define ATTN 1
0650 #define POST 2
0651
0652
0653 static const u8 discrete_preliminary_eq[11][3] = {
0654
0655 { 0x00, 0x00, 0x12 },
0656 { 0x00, 0x00, 0x0c },
0657 { 0x00, 0x00, 0x0f },
0658 { 0x00, 0x00, 0x09 },
0659 { 0x00, 0x00, 0x00 },
0660 { 0x06, 0x00, 0x00 },
0661 { 0x09, 0x00, 0x00 },
0662 { 0x06, 0x00, 0x0f },
0663 { 0x09, 0x00, 0x09 },
0664 { 0x0c, 0x00, 0x00 },
0665 { 0x00, 0x00, 0x18 },
0666 };
0667
0668
0669 static const u8 integrated_preliminary_eq[11][3] = {
0670
0671 { 0x00, 0x1e, 0x07 },
0672 { 0x00, 0x1e, 0x05 },
0673 { 0x00, 0x1e, 0x06 },
0674 { 0x00, 0x1e, 0x04 },
0675 { 0x00, 0x1e, 0x00 },
0676 { 0x03, 0x1e, 0x00 },
0677 { 0x04, 0x1e, 0x00 },
0678 { 0x03, 0x1e, 0x06 },
0679 { 0x03, 0x1e, 0x04 },
0680 { 0x05, 0x1e, 0x00 },
0681 { 0x00, 0x1e, 0x0a },
0682 };
0683
0684 static const u8 discrete_ctle_tunings[11][4] = {
0685
0686 { 0x48, 0x0b, 0x04, 0x04 },
0687 { 0x60, 0x05, 0x0f, 0x0a },
0688 { 0x50, 0x09, 0x06, 0x06 },
0689 { 0x68, 0x05, 0x0f, 0x0a },
0690 { 0x80, 0x05, 0x0f, 0x0a },
0691 { 0x70, 0x05, 0x0f, 0x0a },
0692 { 0x68, 0x05, 0x0f, 0x0a },
0693 { 0x38, 0x0f, 0x00, 0x00 },
0694 { 0x48, 0x09, 0x06, 0x06 },
0695 { 0x60, 0x05, 0x0f, 0x0a },
0696 { 0x38, 0x0f, 0x00, 0x00 },
0697 };
0698
0699 static const u8 integrated_ctle_tunings[11][4] = {
0700
0701 { 0x38, 0x0f, 0x00, 0x00 },
0702 { 0x38, 0x0f, 0x00, 0x00 },
0703 { 0x38, 0x0f, 0x00, 0x00 },
0704 { 0x38, 0x0f, 0x00, 0x00 },
0705 { 0x58, 0x0a, 0x05, 0x05 },
0706 { 0x48, 0x0a, 0x05, 0x05 },
0707 { 0x40, 0x0a, 0x05, 0x05 },
0708 { 0x38, 0x0f, 0x00, 0x00 },
0709 { 0x38, 0x0f, 0x00, 0x00 },
0710 { 0x38, 0x09, 0x06, 0x06 },
0711 { 0x38, 0x0e, 0x01, 0x01 },
0712 };
0713
0714
0715 #define eq_value(pre, curr, post) \
0716 ((((u32)(pre)) << \
0717 PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT) \
0718 | (((u32)(curr)) << PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT) \
0719 | (((u32)(post)) << \
0720 PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT))
0721
0722
0723
0724
0725 static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
0726 u8 div)
0727 {
0728 struct pci_dev *pdev = dd->pcidev;
0729 u32 hit_error = 0;
0730 u32 violation;
0731 u32 i;
0732 u8 c_minus1, c0, c_plus1;
0733 int ret;
0734
0735 for (i = 0; i < 11; i++) {
0736
0737 pci_write_config_dword(pdev, PCIE_CFG_REG_PL103, i);
0738
0739 c_minus1 = eq[i][PREC] / div;
0740 c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div);
0741 c_plus1 = eq[i][POST] / div;
0742 pci_write_config_dword(pdev, PCIE_CFG_REG_PL102,
0743 eq_value(c_minus1, c0, c_plus1));
0744
0745 ret = pci_read_config_dword(dd->pcidev,
0746 PCIE_CFG_REG_PL105, &violation);
0747 if (ret) {
0748 dd_dev_err(dd, "Unable to read from PCI config\n");
0749 hit_error = 1;
0750 break;
0751 }
0752
0753 if (violation
0754 & PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){
0755 if (hit_error == 0) {
0756 dd_dev_err(dd,
0757 "Gen3 EQ Table Coefficient rule violations\n");
0758 dd_dev_err(dd, " prec attn post\n");
0759 }
0760 dd_dev_err(dd, " p%02d: %02x %02x %02x\n",
0761 i, (u32)eq[i][0], (u32)eq[i][1],
0762 (u32)eq[i][2]);
0763 dd_dev_err(dd, " %02x %02x %02x\n",
0764 (u32)c_minus1, (u32)c0, (u32)c_plus1);
0765 hit_error = 1;
0766 }
0767 }
0768 if (hit_error)
0769 return -EINVAL;
0770 return 0;
0771 }
0772
0773
0774
0775
0776
0777
0778 static void pcie_post_steps(struct hfi1_devdata *dd)
0779 {
0780 int i;
0781
0782 set_sbus_fast_mode(dd);
0783
0784
0785
0786
0787
0788
0789
0790 for (i = 0; i < NUM_PCIE_SERDES; i++) {
0791 sbus_request(dd, pcie_pcs_addrs[dd->hfi1_id][i],
0792 0x03, WRITE_SBUS_RECEIVER, 0x00022132);
0793 }
0794
0795 clear_sbus_fast_mode(dd);
0796 }
0797
0798
0799
0800
0801
0802
0803
0804 static int trigger_sbr(struct hfi1_devdata *dd)
0805 {
0806 struct pci_dev *dev = dd->pcidev;
0807 struct pci_dev *pdev;
0808
0809
0810 if (!dev->bus->self) {
0811 dd_dev_err(dd, "%s: no parent device\n", __func__);
0812 return -ENOTTY;
0813 }
0814
0815
0816 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
0817 if (pdev != dev) {
0818 dd_dev_err(dd,
0819 "%s: another device is on the same bus\n",
0820 __func__);
0821 return -ENOTTY;
0822 }
0823
0824
0825
0826
0827
0828
0829 return pci_bridge_secondary_bus_reset(dev->bus->self);
0830 }
0831
0832
0833
0834
0835 static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
0836 u16 code, u16 data)
0837 {
0838 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
0839 (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) |
0840 ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
0841 }
0842
0843
0844
0845
0846 static void arm_gasket_logic(struct hfi1_devdata *dd)
0847 {
0848 u64 reg;
0849
0850 reg = (((u64)1 << dd->hfi1_id) <<
0851 ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) |
0852 ((u64)pcie_serdes_broadcast[dd->hfi1_id] <<
0853 ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT |
0854 ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK |
0855 ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) <<
0856 ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT);
0857 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
0858
0859 read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
0860 }
0861
0862
0863
0864
0865
0866
0867 #define LANE_BUNDLE_MASK CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK
0868 #define LANE_BUNDLE_SHIFT CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT
0869 #define LANE_DELAY_MASK CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK
0870 #define LANE_DELAY_SHIFT CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT
0871 #define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT
0872 #define MARGIN_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_SHIFT
0873 #define MARGIN_G1_G2_OVERWRITE_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK
0874 #define MARGIN_G1_G2_OVERWRITE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT
0875 #define MARGIN_GEN1_GEN2_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK
0876 #define MARGIN_GEN1_GEN2_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT
0877
0878
0879
0880
0881 static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname)
0882 {
0883 u64 pcie_ctrl;
0884 u64 xmt_margin;
0885 u64 xmt_margin_oe;
0886 u64 lane_delay;
0887 u64 lane_bundle;
0888
0889 pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL);
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899 if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) {
0900
0901 xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT)
0902 & MARGIN_GEN1_GEN2_MASK;
0903 xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT)
0904 & MARGIN_G1_G2_OVERWRITE_MASK;
0905 lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK;
0906 lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT)
0907 & LANE_BUNDLE_MASK;
0908
0909
0910
0911
0912
0913 if (is_ax(dd)) {
0914
0915
0916
0917
0918 xmt_margin = 0x5;
0919 xmt_margin_oe = 0x1;
0920 lane_delay = 0xF;
0921 lane_bundle = 0x0;
0922 }
0923
0924
0925 pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT)
0926 | (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT)
0927 | (xmt_margin << MARGIN_SHIFT)
0928 | (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT)
0929 | (lane_delay << LANE_DELAY_SHIFT)
0930 | (lane_bundle << LANE_BUNDLE_SHIFT);
0931
0932 write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl);
0933 }
0934
0935 dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n",
0936 fname, pcie_ctrl);
0937 }
0938
0939
0940
0941
0942 int do_pcie_gen3_transition(struct hfi1_devdata *dd)
0943 {
0944 struct pci_dev *parent = dd->pcidev->bus->self;
0945 u64 fw_ctrl;
0946 u64 reg, therm;
0947 u32 reg32, fs, lf;
0948 u32 status, err;
0949 int ret;
0950 int do_retry, retry_count = 0;
0951 int intnum = 0;
0952 uint default_pset;
0953 uint pset = pcie_pset;
0954 u16 target_vector, target_speed;
0955 u16 lnkctl2, vendor;
0956 u8 div;
0957 const u8 (*eq)[3];
0958 const u8 (*ctle_tunings)[4];
0959 uint static_ctle_mode;
0960 int return_error = 0;
0961 u32 target_width;
0962
0963
0964 if (dd->icode != ICODE_RTL_SILICON)
0965 return 0;
0966
0967 if (pcie_target == 1) {
0968 target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT;
0969 target_speed = 2500;
0970 } else if (pcie_target == 2) {
0971 target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT;
0972 target_speed = 5000;
0973 } else if (pcie_target == 3) {
0974 target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT;
0975 target_speed = 8000;
0976 } else {
0977
0978 dd_dev_info(dd, "%s: Skipping PCIe transition\n", __func__);
0979 return 0;
0980 }
0981
0982
0983 if (dd->lbus_speed == target_speed) {
0984 dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__,
0985 pcie_target,
0986 pcie_force ? "re-doing anyway" : "skipping");
0987 if (!pcie_force)
0988 return 0;
0989 }
0990
0991
0992
0993
0994
0995 if (!parent) {
0996 dd_dev_info(dd, "%s: No upstream, Can't do gen3 transition\n",
0997 __func__);
0998 return 0;
0999 }
1000
1001
1002 target_width = dd->lbus_width;
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 if (pcie_target == 3 && !dd->link_gen3_capable) {
1013 dd_dev_err(dd, "The PCIe link is not Gen3 capable\n");
1014 ret = -ENOSYS;
1015 goto done_no_mutex;
1016 }
1017
1018
1019 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
1020 if (ret) {
1021 dd_dev_err(dd, "%s: unable to acquire SBus resource\n",
1022 __func__);
1023 return ret;
1024 }
1025
1026
1027 therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN);
1028 if (therm) {
1029 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
1030 msleep(100);
1031 dd_dev_info(dd, "%s: Disabled therm polling\n",
1032 __func__);
1033 }
1034
1035 retry:
1036
1037
1038
1039
1040 dd_dev_info(dd, "%s: downloading firmware\n", __func__);
1041 ret = load_pcie_firmware(dd);
1042 if (ret) {
1043
1044 return_error = 1;
1045 goto done;
1046 }
1047
1048
1049 dd_dev_info(dd, "%s: setting PCIe registers\n", __func__);
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, 0xffff);
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 reg32 = 0x10ul << PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT;
1070 pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL2, reg32);
1071
1072
1073
1074
1075
1076
1077
1078
1079 reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK;
1080 pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL100, reg32);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0) {
1091
1092 fs = 24;
1093 lf = 8;
1094 div = 3;
1095 eq = discrete_preliminary_eq;
1096 default_pset = DEFAULT_DISCRETE_PSET;
1097 ctle_tunings = discrete_ctle_tunings;
1098
1099 static_ctle_mode = pcie_ctle & 0x1;
1100 } else {
1101
1102 fs = 29;
1103 lf = 9;
1104 div = 1;
1105 eq = integrated_preliminary_eq;
1106 default_pset = DEFAULT_MCP_PSET;
1107 ctle_tunings = integrated_ctle_tunings;
1108
1109 static_ctle_mode = (pcie_ctle >> 1) & 0x1;
1110 }
1111 pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
1112 (fs <<
1113 PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) |
1114 (lf <<
1115 PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
1116 ret = load_eq_table(dd, eq, fs, div);
1117 if (ret)
1118 goto done;
1119
1120
1121
1122
1123
1124
1125 if (pset == UNSET_PSET)
1126 pset = default_pset;
1127 if (pset > 10) {
1128 dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n",
1129 __func__, pset, default_pset);
1130 pset = default_pset;
1131 }
1132 dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pset);
1133 pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
1134 ((1 << pset) <<
1135 PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) |
1136 PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK |
1137 PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
1138
1139
1140
1141
1142 dd_dev_info(dd, "%s: doing pcie post steps\n", __func__);
1143 pcie_post_steps(dd);
1144
1145
1146
1147
1148
1149 write_gasket_interrupt(dd, intnum++, 0x0006, 0x0050);
1150
1151
1152 write_gasket_interrupt(dd, intnum++, 0x0026,
1153 0x5b01 | (static_ctle_mode << 3));
1154
1155
1156
1157
1158 write_gasket_interrupt(dd, intnum++, 0x0026, 0x5202);
1159
1160 if (static_ctle_mode) {
1161
1162 u8 pcie_dc, pcie_lf, pcie_hf, pcie_bw;
1163
1164 pcie_dc = ctle_tunings[pset][0];
1165 pcie_lf = ctle_tunings[pset][1];
1166 pcie_hf = ctle_tunings[pset][2];
1167 pcie_bw = ctle_tunings[pset][3];
1168 write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc);
1169 write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf);
1170 write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf);
1171 write_gasket_interrupt(dd, intnum++, 0x0026, 0x5500 | pcie_bw);
1172 }
1173
1174
1175 write_gasket_interrupt(dd, intnum++, 0x0000, 0x0000);
1176
1177
1178
1179
1180 write_xmt_margin(dd, __func__);
1181
1182
1183
1184
1185
1186 dd_dev_info(dd, "%s: clearing ASPM\n", __func__);
1187 aspm_hw_disable_l1(dd);
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 dd_dev_info(dd, "%s: setting parent target link speed\n", __func__);
1206 ret = pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
1207 if (ret) {
1208 dd_dev_err(dd, "Unable to read from PCI config\n");
1209 return_error = 1;
1210 goto done;
1211 }
1212
1213 dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
1214 (u32)lnkctl2);
1215
1216 if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
1217 lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
1218 lnkctl2 |= target_vector;
1219 dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
1220 (u32)lnkctl2);
1221 ret = pcie_capability_write_word(parent,
1222 PCI_EXP_LNKCTL2, lnkctl2);
1223 if (ret) {
1224 dd_dev_err(dd, "Unable to write to PCI config\n");
1225 return_error = 1;
1226 goto done;
1227 }
1228 } else {
1229 dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
1230 }
1231
1232 dd_dev_info(dd, "%s: setting target link speed\n", __func__);
1233 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
1234 if (ret) {
1235 dd_dev_err(dd, "Unable to read from PCI config\n");
1236 return_error = 1;
1237 goto done;
1238 }
1239
1240 dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
1241 (u32)lnkctl2);
1242 lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
1243 lnkctl2 |= target_vector;
1244 dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
1245 (u32)lnkctl2);
1246 ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
1247 if (ret) {
1248 dd_dev_err(dd, "Unable to write to PCI config\n");
1249 return_error = 1;
1250 goto done;
1251 }
1252
1253
1254
1255 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
1256 (void)read_csr(dd, CCE_DC_CTRL);
1257
1258 fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL);
1259
1260 dd_dev_info(dd, "%s: arming gasket logic\n", __func__);
1261 arm_gasket_logic(dd);
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 dd_dev_info(dd, "%s: calling trigger_sbr\n", __func__);
1277 ret = trigger_sbr(dd);
1278 if (ret)
1279 goto done;
1280
1281
1282
1283
1284 ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor);
1285 if (ret) {
1286 dd_dev_info(dd,
1287 "%s: read of VendorID failed after SBR, err %d\n",
1288 __func__, ret);
1289 return_error = 1;
1290 goto done;
1291 }
1292 if (vendor == 0xffff) {
1293 dd_dev_info(dd, "%s: VendorID is all 1s after SBR\n", __func__);
1294 return_error = 1;
1295 ret = -EIO;
1296 goto done;
1297 }
1298
1299
1300 dd_dev_info(dd, "%s: calling restore_pci_variables\n", __func__);
1301 ret = restore_pci_variables(dd);
1302 if (ret) {
1303 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
1304 __func__);
1305 return_error = 1;
1306 goto done;
1307 }
1308
1309
1310 write_csr(dd, MISC_CFG_FW_CTRL, fw_ctrl);
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322 reg = read_csr(dd, ASIC_PCIE_SD_HOST_STATUS);
1323 dd_dev_info(dd, "%s: gasket block status: 0x%llx\n", __func__, reg);
1324 if (reg == ~0ull) {
1325 dd_dev_err(dd, "SBR failed - unable to read from device\n");
1326 return_error = 1;
1327 ret = -ENOSYS;
1328 goto done;
1329 }
1330
1331
1332 write_csr(dd, CCE_DC_CTRL, 0);
1333
1334
1335 setextled(dd, 0);
1336
1337
1338 ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, ®32);
1339 if (ret) {
1340 dd_dev_err(dd, "Unable to read from PCI config\n");
1341 return_error = 1;
1342 goto done;
1343 }
1344
1345 dd_dev_info(dd, "%s: per-lane errors: 0x%x\n", __func__, reg32);
1346
1347
1348 status = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT)
1349 & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK;
1350 if ((status & (1 << dd->hfi1_id)) == 0) {
1351 dd_dev_err(dd,
1352 "%s: gasket status 0x%x, expecting 0x%x\n",
1353 __func__, status, 1 << dd->hfi1_id);
1354 ret = -EIO;
1355 goto done;
1356 }
1357
1358
1359 err = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT)
1360 & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK;
1361 if (err) {
1362 dd_dev_err(dd, "%s: gasket error %d\n", __func__, err);
1363 ret = -EIO;
1364 goto done;
1365 }
1366
1367
1368 update_lbus_info(dd);
1369 dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
1370 dd->lbus_info);
1371
1372 if (dd->lbus_speed != target_speed ||
1373 dd->lbus_width < target_width) {
1374
1375 do_retry = retry_count < pcie_retry;
1376 dd_dev_err(dd, "PCIe link speed or width did not match target%s\n",
1377 do_retry ? ", retrying" : "");
1378 retry_count++;
1379 if (do_retry) {
1380 msleep(100);
1381 goto retry;
1382 }
1383 ret = -EIO;
1384 }
1385
1386 done:
1387 if (therm) {
1388 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
1389 msleep(100);
1390 dd_dev_info(dd, "%s: Re-enable therm polling\n",
1391 __func__);
1392 }
1393 release_chip_resource(dd, CR_SBUS);
1394 done_no_mutex:
1395
1396 if (ret && !return_error) {
1397 dd_dev_err(dd, "Proceeding at current speed PCIe speed\n");
1398 ret = 0;
1399 }
1400
1401 dd_dev_info(dd, "%s: done\n", __func__);
1402 return ret;
1403 }