0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/delay.h>
0013 #include <linux/export.h>
0014 #include <linux/gfp.h>
0015 #include <linux/kernel.h>
0016 #include <linux/of.h>
0017 #include <linux/pci.h>
0018 #include <linux/string.h>
0019
0020 #include <asm/pci-bridge.h>
0021 #include <asm/ppc-pci.h>
0022
0023 static int eeh_pe_aux_size = 0;
0024 static LIST_HEAD(eeh_phb_pe);
0025
0026
0027
0028
0029
0030
0031
0032 void eeh_set_pe_aux_size(int size)
0033 {
0034 if (size < 0)
0035 return;
0036
0037 eeh_pe_aux_size = size;
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047 static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
0048 {
0049 struct eeh_pe *pe;
0050 size_t alloc_size;
0051
0052 alloc_size = sizeof(struct eeh_pe);
0053 if (eeh_pe_aux_size) {
0054 alloc_size = ALIGN(alloc_size, cache_line_size());
0055 alloc_size += eeh_pe_aux_size;
0056 }
0057
0058
0059 pe = kzalloc(alloc_size, GFP_KERNEL);
0060 if (!pe) return NULL;
0061
0062
0063 pe->type = type;
0064 pe->phb = phb;
0065 INIT_LIST_HEAD(&pe->child_list);
0066 INIT_LIST_HEAD(&pe->edevs);
0067
0068 pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe),
0069 cache_line_size());
0070 return pe;
0071 }
0072
0073
0074
0075
0076
0077
0078
0079
0080 int eeh_phb_pe_create(struct pci_controller *phb)
0081 {
0082 struct eeh_pe *pe;
0083
0084
0085 pe = eeh_pe_alloc(phb, EEH_PE_PHB);
0086 if (!pe) {
0087 pr_err("%s: out of memory!\n", __func__);
0088 return -ENOMEM;
0089 }
0090
0091
0092 list_add_tail(&pe->child, &eeh_phb_pe);
0093
0094 pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number);
0095
0096 return 0;
0097 }
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 int eeh_wait_state(struct eeh_pe *pe, int max_wait)
0108 {
0109 int ret;
0110 int mwait;
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 #define EEH_STATE_MIN_WAIT_TIME (1000)
0121 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
0122
0123 while (1) {
0124 ret = eeh_ops->get_state(pe, &mwait);
0125
0126 if (ret != EEH_STATE_UNAVAILABLE)
0127 return ret;
0128
0129 if (max_wait <= 0) {
0130 pr_warn("%s: Timeout when getting PE's state (%d)\n",
0131 __func__, max_wait);
0132 return EEH_STATE_NOT_SUPPORT;
0133 }
0134
0135 if (mwait < EEH_STATE_MIN_WAIT_TIME) {
0136 pr_warn("%s: Firmware returned bad wait value %d\n",
0137 __func__, mwait);
0138 mwait = EEH_STATE_MIN_WAIT_TIME;
0139 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
0140 pr_warn("%s: Firmware returned too long wait value %d\n",
0141 __func__, mwait);
0142 mwait = EEH_STATE_MAX_WAIT_TIME;
0143 }
0144
0145 msleep(min(mwait, max_wait));
0146 max_wait -= mwait;
0147 }
0148 }
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
0159 {
0160 struct eeh_pe *pe;
0161
0162 list_for_each_entry(pe, &eeh_phb_pe, child) {
0163
0164
0165
0166
0167
0168 if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
0169 return pe;
0170 }
0171
0172 return NULL;
0173 }
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183 struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root)
0184 {
0185 struct list_head *next = pe->child_list.next;
0186
0187 if (next == &pe->child_list) {
0188 while (1) {
0189 if (pe == root)
0190 return NULL;
0191 next = pe->child.next;
0192 if (next != &pe->parent->child_list)
0193 break;
0194 pe = pe->parent;
0195 }
0196 }
0197
0198 return list_entry(next, struct eeh_pe, child);
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 void *eeh_pe_traverse(struct eeh_pe *root,
0213 eeh_pe_traverse_func fn, void *flag)
0214 {
0215 struct eeh_pe *pe;
0216 void *ret;
0217
0218 eeh_for_each_pe(root, pe) {
0219 ret = fn(pe, flag);
0220 if (ret) return ret;
0221 }
0222
0223 return NULL;
0224 }
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235 void eeh_pe_dev_traverse(struct eeh_pe *root,
0236 eeh_edev_traverse_func fn, void *flag)
0237 {
0238 struct eeh_pe *pe;
0239 struct eeh_dev *edev, *tmp;
0240
0241 if (!root) {
0242 pr_warn("%s: Invalid PE %p\n",
0243 __func__, root);
0244 return;
0245 }
0246
0247
0248 eeh_for_each_pe(root, pe)
0249 eeh_pe_for_each_dev(pe, edev, tmp)
0250 fn(edev, flag);
0251 }
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
0262 {
0263 int *target_pe = flag;
0264
0265
0266 if (pe->type & EEH_PE_PHB)
0267 return NULL;
0268
0269 if (*target_pe == pe->addr)
0270 return pe;
0271
0272 return NULL;
0273 }
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no)
0288 {
0289 struct eeh_pe *root = eeh_phb_pe_get(phb);
0290
0291 return eeh_pe_traverse(root, __eeh_pe_get, &pe_no);
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
0308 {
0309 struct pci_controller *hose = edev->controller;
0310 struct eeh_pe *pe, *parent;
0311
0312
0313
0314
0315
0316
0317
0318 pe = eeh_pe_get(hose, edev->pe_config_addr);
0319 if (pe) {
0320 if (pe->type & EEH_PE_INVALID) {
0321 list_add_tail(&edev->entry, &pe->edevs);
0322 edev->pe = pe;
0323
0324
0325
0326
0327 parent = pe;
0328 while (parent) {
0329 if (!(parent->type & EEH_PE_INVALID))
0330 break;
0331 parent->type &= ~EEH_PE_INVALID;
0332 parent = parent->parent;
0333 }
0334
0335 eeh_edev_dbg(edev, "Added to existing PE (parent: PE#%x)\n",
0336 pe->parent->addr);
0337 } else {
0338
0339 pe->type = EEH_PE_BUS;
0340 edev->pe = pe;
0341
0342
0343 list_add_tail(&edev->entry, &pe->edevs);
0344 eeh_edev_dbg(edev, "Added to bus PE\n");
0345 }
0346 return 0;
0347 }
0348
0349
0350 if (edev->physfn)
0351 pe = eeh_pe_alloc(hose, EEH_PE_VF);
0352 else
0353 pe = eeh_pe_alloc(hose, EEH_PE_DEVICE);
0354 if (!pe) {
0355 pr_err("%s: out of memory!\n", __func__);
0356 return -ENOMEM;
0357 }
0358
0359 pe->addr = edev->pe_config_addr;
0360
0361
0362
0363
0364
0365
0366
0367 if (!new_pe_parent) {
0368 new_pe_parent = eeh_phb_pe_get(hose);
0369 if (!new_pe_parent) {
0370 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
0371 __func__, hose->global_number);
0372 edev->pe = NULL;
0373 kfree(pe);
0374 return -EEXIST;
0375 }
0376 }
0377
0378
0379 pe->parent = new_pe_parent;
0380 list_add_tail(&pe->child, &new_pe_parent->child_list);
0381
0382
0383
0384
0385
0386 list_add_tail(&edev->entry, &pe->edevs);
0387 edev->pe = pe;
0388 eeh_edev_dbg(edev, "Added to new (parent: PE#%x)\n",
0389 new_pe_parent->addr);
0390
0391 return 0;
0392 }
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403 int eeh_pe_tree_remove(struct eeh_dev *edev)
0404 {
0405 struct eeh_pe *pe, *parent, *child;
0406 bool keep, recover;
0407 int cnt;
0408
0409 pe = eeh_dev_to_pe(edev);
0410 if (!pe) {
0411 eeh_edev_dbg(edev, "No PE found for device.\n");
0412 return -EEXIST;
0413 }
0414
0415
0416 edev->pe = NULL;
0417 list_del(&edev->entry);
0418
0419
0420
0421
0422
0423
0424
0425 while (1) {
0426 parent = pe->parent;
0427
0428
0429 if (pe->type & EEH_PE_PHB)
0430 break;
0431
0432
0433
0434
0435
0436
0437 keep = !!(pe->state & EEH_PE_KEEP);
0438 recover = !!(pe->state & EEH_PE_RECOVERING);
0439 WARN_ON(keep && !recover);
0440
0441 if (!keep && !recover) {
0442 if (list_empty(&pe->edevs) &&
0443 list_empty(&pe->child_list)) {
0444 list_del(&pe->child);
0445 kfree(pe);
0446 } else {
0447 break;
0448 }
0449 } else {
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459 if (list_empty(&pe->edevs)) {
0460 cnt = 0;
0461 list_for_each_entry(child, &pe->child_list, child) {
0462 if (!(child->type & EEH_PE_INVALID)) {
0463 cnt++;
0464 break;
0465 }
0466 }
0467
0468 if (!cnt)
0469 pe->type |= EEH_PE_INVALID;
0470 else
0471 break;
0472 }
0473 }
0474
0475 pe = parent;
0476 }
0477
0478 return 0;
0479 }
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 void eeh_pe_update_time_stamp(struct eeh_pe *pe)
0491 {
0492 time64_t tstamp;
0493
0494 if (!pe) return;
0495
0496 if (pe->freeze_count <= 0) {
0497 pe->freeze_count = 0;
0498 pe->tstamp = ktime_get_seconds();
0499 } else {
0500 tstamp = ktime_get_seconds();
0501 if (tstamp - pe->tstamp > 3600) {
0502 pe->tstamp = tstamp;
0503 pe->freeze_count = 0;
0504 }
0505 }
0506 }
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516 void eeh_pe_state_mark(struct eeh_pe *root, int state)
0517 {
0518 struct eeh_pe *pe;
0519
0520 eeh_for_each_pe(root, pe)
0521 if (!(pe->state & EEH_PE_REMOVED))
0522 pe->state |= state;
0523 }
0524 EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534 void eeh_pe_mark_isolated(struct eeh_pe *root)
0535 {
0536 struct eeh_pe *pe;
0537 struct eeh_dev *edev;
0538 struct pci_dev *pdev;
0539
0540 eeh_pe_state_mark(root, EEH_PE_ISOLATED);
0541 eeh_for_each_pe(root, pe) {
0542 list_for_each_entry(edev, &pe->edevs, entry) {
0543 pdev = eeh_dev_to_pci_dev(edev);
0544 if (pdev)
0545 pdev->error_state = pci_channel_io_frozen;
0546 }
0547
0548 if (pe->state & EEH_PE_CFG_RESTRICTED)
0549 pe->state |= EEH_PE_CFG_BLOCKED;
0550 }
0551 }
0552 EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated);
0553
0554 static void __eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag)
0555 {
0556 int mode = *((int *)flag);
0557
0558 edev->mode |= mode;
0559 }
0560
0561
0562
0563
0564
0565
0566
0567 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode)
0568 {
0569 eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode);
0570 }
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582 void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed)
0583 {
0584 struct eeh_pe *pe;
0585 struct eeh_dev *edev, *tmp;
0586 struct pci_dev *pdev;
0587
0588 eeh_for_each_pe(root, pe) {
0589
0590 if (pe->state & EEH_PE_REMOVED)
0591 continue;
0592
0593 if (!include_passed && eeh_pe_passed(pe))
0594 continue;
0595
0596 pe->state &= ~state;
0597
0598
0599
0600
0601
0602
0603 if (!(state & EEH_PE_ISOLATED))
0604 continue;
0605
0606 pe->check_count = 0;
0607 eeh_pe_for_each_dev(pe, edev, tmp) {
0608 pdev = eeh_dev_to_pci_dev(edev);
0609 if (!pdev)
0610 continue;
0611
0612 pdev->error_state = pci_channel_io_normal;
0613 }
0614
0615
0616 if (pe->state & EEH_PE_CFG_RESTRICTED)
0617 pe->state &= ~EEH_PE_CFG_BLOCKED;
0618 }
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632 static void eeh_bridge_check_link(struct eeh_dev *edev)
0633 {
0634 int cap;
0635 uint32_t val;
0636 int timeout = 0;
0637
0638
0639
0640
0641
0642 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
0643 return;
0644
0645 eeh_edev_dbg(edev, "Checking PCIe link...\n");
0646
0647
0648 cap = edev->pcie_cap;
0649 eeh_ops->read_config(edev, cap + PCI_EXP_SLTSTA, 2, &val);
0650 if (!(val & PCI_EXP_SLTSTA_PDS)) {
0651 eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val);
0652 return;
0653 }
0654
0655
0656 eeh_ops->read_config(edev, cap + PCI_EXP_SLTCAP, 2, &val);
0657 if (val & PCI_EXP_SLTCAP_PCP) {
0658 eeh_ops->read_config(edev, cap + PCI_EXP_SLTCTL, 2, &val);
0659 if (val & PCI_EXP_SLTCTL_PCC) {
0660 eeh_edev_dbg(edev, "In power-off state, power it on ...\n");
0661 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
0662 val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
0663 eeh_ops->write_config(edev, cap + PCI_EXP_SLTCTL, 2, val);
0664 msleep(2 * 1000);
0665 }
0666 }
0667
0668
0669 eeh_ops->read_config(edev, cap + PCI_EXP_LNKCTL, 2, &val);
0670 val &= ~PCI_EXP_LNKCTL_LD;
0671 eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
0672
0673
0674 eeh_ops->read_config(edev, cap + PCI_EXP_LNKCAP, 4, &val);
0675 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
0676 eeh_edev_dbg(edev, "No link reporting capability (0x%08x) \n", val);
0677 msleep(1000);
0678 return;
0679 }
0680
0681
0682 timeout = 0;
0683 while (timeout < 5000) {
0684 msleep(20);
0685 timeout += 20;
0686
0687 eeh_ops->read_config(edev, cap + PCI_EXP_LNKSTA, 2, &val);
0688 if (val & PCI_EXP_LNKSTA_DLLLA)
0689 break;
0690 }
0691
0692 if (val & PCI_EXP_LNKSTA_DLLLA)
0693 eeh_edev_dbg(edev, "Link up (%s)\n",
0694 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB");
0695 else
0696 eeh_edev_dbg(edev, "Link not ready (0x%04x)\n", val);
0697 }
0698
0699 #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
0700 #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
0701
0702 static void eeh_restore_bridge_bars(struct eeh_dev *edev)
0703 {
0704 int i;
0705
0706
0707
0708
0709
0710 for (i = 4; i < 13; i++)
0711 eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
0712
0713 eeh_ops->write_config(edev, 14*4, 4, edev->config_space[14]);
0714
0715
0716 eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
0717 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
0718 eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
0719 SAVED_BYTE(PCI_LATENCY_TIMER));
0720
0721 eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
0722
0723
0724 eeh_ops->write_config(edev, PCI_COMMAND, 4, edev->config_space[1] |
0725 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
0726
0727
0728 eeh_bridge_check_link(edev);
0729 }
0730
0731 static void eeh_restore_device_bars(struct eeh_dev *edev)
0732 {
0733 int i;
0734 u32 cmd;
0735
0736 for (i = 4; i < 10; i++)
0737 eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
0738
0739 eeh_ops->write_config(edev, 12*4, 4, edev->config_space[12]);
0740
0741 eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
0742 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
0743 eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
0744 SAVED_BYTE(PCI_LATENCY_TIMER));
0745
0746
0747 eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
0748
0749
0750
0751
0752
0753 eeh_ops->read_config(edev, PCI_COMMAND, 4, &cmd);
0754 if (edev->config_space[1] & PCI_COMMAND_PARITY)
0755 cmd |= PCI_COMMAND_PARITY;
0756 else
0757 cmd &= ~PCI_COMMAND_PARITY;
0758 if (edev->config_space[1] & PCI_COMMAND_SERR)
0759 cmd |= PCI_COMMAND_SERR;
0760 else
0761 cmd &= ~PCI_COMMAND_SERR;
0762 eeh_ops->write_config(edev, PCI_COMMAND, 4, cmd);
0763 }
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag)
0775 {
0776
0777 if (edev->mode & EEH_DEV_BRIDGE)
0778 eeh_restore_bridge_bars(edev);
0779 else
0780 eeh_restore_device_bars(edev);
0781
0782 if (eeh_ops->restore_config)
0783 eeh_ops->restore_config(edev);
0784 }
0785
0786
0787
0788
0789
0790
0791
0792
0793 void eeh_pe_restore_bars(struct eeh_pe *pe)
0794 {
0795
0796
0797
0798
0799 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
0800 }
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811 const char *eeh_pe_loc_get(struct eeh_pe *pe)
0812 {
0813 struct pci_bus *bus = eeh_pe_bus_get(pe);
0814 struct device_node *dn;
0815 const char *loc = NULL;
0816
0817 while (bus) {
0818 dn = pci_bus_to_OF_node(bus);
0819 if (!dn) {
0820 bus = bus->parent;
0821 continue;
0822 }
0823
0824 if (pci_is_root_bus(bus))
0825 loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
0826 else
0827 loc = of_get_property(dn, "ibm,slot-location-code",
0828 NULL);
0829
0830 if (loc)
0831 return loc;
0832
0833 bus = bus->parent;
0834 }
0835
0836 return "N/A";
0837 }
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849 struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
0850 {
0851 struct eeh_dev *edev;
0852 struct pci_dev *pdev;
0853
0854 if (pe->type & EEH_PE_PHB)
0855 return pe->phb->bus;
0856
0857
0858 if (pe->state & EEH_PE_PRI_BUS)
0859 return pe->bus;
0860
0861
0862 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
0863 pdev = eeh_dev_to_pci_dev(edev);
0864 if (pdev)
0865 return pdev->bus;
0866
0867 return NULL;
0868 }