0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/delay.h>
0012 #include <linux/sched.h>
0013 #include <linux/init.h>
0014 #include <linux/list.h>
0015 #include <linux/pci.h>
0016 #include <linux/iommu.h>
0017 #include <linux/proc_fs.h>
0018 #include <linux/rbtree.h>
0019 #include <linux/reboot.h>
0020 #include <linux/seq_file.h>
0021 #include <linux/spinlock.h>
0022 #include <linux/export.h>
0023 #include <linux/of.h>
0024 #include <linux/debugfs.h>
0025
0026 #include <linux/atomic.h>
0027 #include <asm/eeh.h>
0028 #include <asm/eeh_event.h>
0029 #include <asm/io.h>
0030 #include <asm/iommu.h>
0031 #include <asm/machdep.h>
0032 #include <asm/ppc-pci.h>
0033 #include <asm/rtas.h>
0034 #include <asm/pte-walk.h>
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 #define EEH_MAX_FAILS 2100000
0077
0078
0079 #define PCI_BUS_RESET_WAIT_MSEC (5*60*1000)
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 int eeh_subsystem_flags;
0092 EXPORT_SYMBOL(eeh_subsystem_flags);
0093
0094
0095
0096
0097
0098
0099 u32 eeh_max_freezes = 5;
0100
0101
0102
0103
0104
0105
0106 bool eeh_debugfs_no_recover;
0107
0108
0109 struct eeh_ops *eeh_ops = NULL;
0110
0111
0112 DEFINE_RAW_SPINLOCK(confirm_error_lock);
0113 EXPORT_SYMBOL_GPL(confirm_error_lock);
0114
0115
0116 static DEFINE_MUTEX(eeh_dev_mutex);
0117
0118
0119
0120
0121
0122 #define EEH_PCI_REGS_LOG_LEN 8192
0123 static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
0124
0125
0126
0127
0128
0129
0130 struct eeh_stats {
0131 u64 no_device;
0132 u64 no_dn;
0133 u64 no_cfg_addr;
0134 u64 ignored_check;
0135 u64 total_mmio_ffs;
0136 u64 false_positives;
0137 u64 slot_resets;
0138 };
0139
0140 static struct eeh_stats eeh_stats;
0141
0142 static int __init eeh_setup(char *str)
0143 {
0144 if (!strcmp(str, "off"))
0145 eeh_add_flag(EEH_FORCE_DISABLED);
0146 else if (!strcmp(str, "early_log"))
0147 eeh_add_flag(EEH_EARLY_DUMP_LOG);
0148
0149 return 1;
0150 }
0151 __setup("eeh=", eeh_setup);
0152
0153 void eeh_show_enabled(void)
0154 {
0155 if (eeh_has_flag(EEH_FORCE_DISABLED))
0156 pr_info("EEH: Recovery disabled by kernel parameter.\n");
0157 else if (eeh_has_flag(EEH_ENABLED))
0158 pr_info("EEH: Capable adapter found: recovery enabled.\n");
0159 else
0160 pr_info("EEH: No capable adapters found: recovery disabled.\n");
0161 }
0162
0163
0164
0165
0166
0167
0168 static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
0169 {
0170 u32 cfg;
0171 int cap, i;
0172 int n = 0, l = 0;
0173 char buffer[128];
0174
0175 n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
0176 edev->pe->phb->global_number, edev->bdfn >> 8,
0177 PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn));
0178 pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
0179 edev->pe->phb->global_number, edev->bdfn >> 8,
0180 PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn));
0181
0182 eeh_ops->read_config(edev, PCI_VENDOR_ID, 4, &cfg);
0183 n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
0184 pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
0185
0186 eeh_ops->read_config(edev, PCI_COMMAND, 4, &cfg);
0187 n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
0188 pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
0189
0190
0191 if (edev->mode & EEH_DEV_BRIDGE) {
0192 eeh_ops->read_config(edev, PCI_SEC_STATUS, 2, &cfg);
0193 n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
0194 pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
0195
0196 eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &cfg);
0197 n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
0198 pr_warn("EEH: Bridge control: %04x\n", cfg);
0199 }
0200
0201
0202 cap = edev->pcix_cap;
0203 if (cap) {
0204 eeh_ops->read_config(edev, cap, 4, &cfg);
0205 n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
0206 pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
0207
0208 eeh_ops->read_config(edev, cap+4, 4, &cfg);
0209 n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
0210 pr_warn("EEH: PCI-X status: %08x\n", cfg);
0211 }
0212
0213
0214 cap = edev->pcie_cap;
0215 if (cap) {
0216 n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
0217 pr_warn("EEH: PCI-E capabilities and status follow:\n");
0218
0219 for (i=0; i<=8; i++) {
0220 eeh_ops->read_config(edev, cap+4*i, 4, &cfg);
0221 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
0222
0223 if ((i % 4) == 0) {
0224 if (i != 0)
0225 pr_warn("%s\n", buffer);
0226
0227 l = scnprintf(buffer, sizeof(buffer),
0228 "EEH: PCI-E %02x: %08x ",
0229 4*i, cfg);
0230 } else {
0231 l += scnprintf(buffer+l, sizeof(buffer)-l,
0232 "%08x ", cfg);
0233 }
0234
0235 }
0236
0237 pr_warn("%s\n", buffer);
0238 }
0239
0240
0241 cap = edev->aer_cap;
0242 if (cap) {
0243 n += scnprintf(buf+n, len-n, "pci-e AER:\n");
0244 pr_warn("EEH: PCI-E AER capability register set follows:\n");
0245
0246 for (i=0; i<=13; i++) {
0247 eeh_ops->read_config(edev, cap+4*i, 4, &cfg);
0248 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
0249
0250 if ((i % 4) == 0) {
0251 if (i != 0)
0252 pr_warn("%s\n", buffer);
0253
0254 l = scnprintf(buffer, sizeof(buffer),
0255 "EEH: PCI-E AER %02x: %08x ",
0256 4*i, cfg);
0257 } else {
0258 l += scnprintf(buffer+l, sizeof(buffer)-l,
0259 "%08x ", cfg);
0260 }
0261 }
0262
0263 pr_warn("%s\n", buffer);
0264 }
0265
0266 return n;
0267 }
0268
0269 static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag)
0270 {
0271 struct eeh_dev *edev, *tmp;
0272 size_t *plen = flag;
0273
0274 eeh_pe_for_each_dev(pe, edev, tmp)
0275 *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
0276 EEH_PCI_REGS_LOG_LEN - *plen);
0277
0278 return NULL;
0279 }
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
0292 {
0293 size_t loglen = 0;
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 if (!(pe->type & EEH_PE_PHB)) {
0312 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
0313 severity == EEH_LOG_PERM)
0314 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 eeh_ops->configure_bridge(pe);
0329 if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
0330 eeh_pe_restore_bars(pe);
0331
0332 pci_regs_buf[0] = 0;
0333 eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
0334 }
0335 }
0336
0337 eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
0338 }
0339
0340
0341
0342
0343
0344
0345
0346
0347 static inline unsigned long eeh_token_to_phys(unsigned long token)
0348 {
0349 return ppc_find_vmap_phys(token);
0350 }
0351
0352
0353
0354
0355
0356
0357 static int eeh_phb_check_failure(struct eeh_pe *pe)
0358 {
0359 struct eeh_pe *phb_pe;
0360 unsigned long flags;
0361 int ret;
0362
0363 if (!eeh_has_flag(EEH_PROBE_MODE_DEV))
0364 return -EPERM;
0365
0366
0367 phb_pe = eeh_phb_pe_get(pe->phb);
0368 if (!phb_pe) {
0369 pr_warn("%s Can't find PE for PHB#%x\n",
0370 __func__, pe->phb->global_number);
0371 return -EEXIST;
0372 }
0373
0374
0375 eeh_serialize_lock(&flags);
0376 if (phb_pe->state & EEH_PE_ISOLATED) {
0377 ret = 0;
0378 goto out;
0379 }
0380
0381
0382 ret = eeh_ops->get_state(phb_pe, NULL);
0383 if ((ret < 0) ||
0384 (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) {
0385 ret = 0;
0386 goto out;
0387 }
0388
0389
0390 eeh_pe_mark_isolated(phb_pe);
0391 eeh_serialize_unlock(flags);
0392
0393 pr_debug("EEH: PHB#%x failure detected, location: %s\n",
0394 phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
0395 eeh_send_failure_event(phb_pe);
0396 return 1;
0397 out:
0398 eeh_serialize_unlock(flags);
0399 return ret;
0400 }
0401
0402 static inline const char *eeh_driver_name(struct pci_dev *pdev)
0403 {
0404 if (pdev)
0405 return dev_driver_string(&pdev->dev);
0406
0407 return "<null>";
0408 }
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424 int eeh_dev_check_failure(struct eeh_dev *edev)
0425 {
0426 int ret;
0427 unsigned long flags;
0428 struct device_node *dn;
0429 struct pci_dev *dev;
0430 struct eeh_pe *pe, *parent_pe;
0431 int rc = 0;
0432 const char *location = NULL;
0433
0434 eeh_stats.total_mmio_ffs++;
0435
0436 if (!eeh_enabled())
0437 return 0;
0438
0439 if (!edev) {
0440 eeh_stats.no_dn++;
0441 return 0;
0442 }
0443 dev = eeh_dev_to_pci_dev(edev);
0444 pe = eeh_dev_to_pe(edev);
0445
0446
0447 if (!pe) {
0448 eeh_stats.ignored_check++;
0449 eeh_edev_dbg(edev, "Ignored check\n");
0450 return 0;
0451 }
0452
0453
0454
0455
0456
0457 ret = eeh_phb_check_failure(pe);
0458 if (ret > 0)
0459 return ret;
0460
0461
0462
0463
0464
0465
0466 if (eeh_pe_passed(pe))
0467 return 0;
0468
0469
0470
0471
0472
0473
0474
0475 eeh_serialize_lock(&flags);
0476 rc = 1;
0477 if (pe->state & EEH_PE_ISOLATED) {
0478 pe->check_count++;
0479 if (pe->check_count == EEH_MAX_FAILS) {
0480 dn = pci_device_to_OF_node(dev);
0481 if (dn)
0482 location = of_get_property(dn, "ibm,loc-code",
0483 NULL);
0484 eeh_edev_err(edev, "%d reads ignored for recovering device at location=%s driver=%s\n",
0485 pe->check_count,
0486 location ? location : "unknown",
0487 eeh_driver_name(dev));
0488 eeh_edev_err(edev, "Might be infinite loop in %s driver\n",
0489 eeh_driver_name(dev));
0490 dump_stack();
0491 }
0492 goto dn_unlock;
0493 }
0494
0495
0496
0497
0498
0499
0500
0501
0502 ret = eeh_ops->get_state(pe, NULL);
0503
0504
0505
0506
0507
0508
0509
0510 if ((ret < 0) ||
0511 (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) {
0512 eeh_stats.false_positives++;
0513 pe->false_positives++;
0514 rc = 0;
0515 goto dn_unlock;
0516 }
0517
0518
0519
0520
0521
0522
0523 parent_pe = pe->parent;
0524 while (parent_pe) {
0525
0526 if (parent_pe->type & EEH_PE_PHB)
0527 break;
0528
0529
0530 ret = eeh_ops->get_state(parent_pe, NULL);
0531 if (ret > 0 && !eeh_state_active(ret)) {
0532 pe = parent_pe;
0533 pr_err("EEH: Failure of PHB#%x-PE#%x will be handled at parent PHB#%x-PE#%x.\n",
0534 pe->phb->global_number, pe->addr,
0535 pe->phb->global_number, parent_pe->addr);
0536 }
0537
0538
0539 parent_pe = parent_pe->parent;
0540 }
0541
0542 eeh_stats.slot_resets++;
0543
0544
0545
0546
0547
0548 eeh_pe_mark_isolated(pe);
0549 eeh_serialize_unlock(flags);
0550
0551
0552
0553
0554
0555 pr_debug("EEH: %s: Frozen PHB#%x-PE#%x detected\n",
0556 __func__, pe->phb->global_number, pe->addr);
0557 eeh_send_failure_event(pe);
0558
0559 return 1;
0560
0561 dn_unlock:
0562 eeh_serialize_unlock(flags);
0563 return rc;
0564 }
0565
0566 EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579 int eeh_check_failure(const volatile void __iomem *token)
0580 {
0581 unsigned long addr;
0582 struct eeh_dev *edev;
0583
0584
0585 addr = eeh_token_to_phys((unsigned long __force) token);
0586 edev = eeh_addr_cache_get_dev(addr);
0587 if (!edev) {
0588 eeh_stats.no_device++;
0589 return 0;
0590 }
0591
0592 return eeh_dev_check_failure(edev);
0593 }
0594 EXPORT_SYMBOL(eeh_check_failure);
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606 int eeh_pci_enable(struct eeh_pe *pe, int function)
0607 {
0608 int active_flag, rc;
0609
0610
0611
0612
0613
0614
0615 switch (function) {
0616 case EEH_OPT_THAW_MMIO:
0617 active_flag = EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED;
0618 break;
0619 case EEH_OPT_THAW_DMA:
0620 active_flag = EEH_STATE_DMA_ACTIVE;
0621 break;
0622 case EEH_OPT_DISABLE:
0623 case EEH_OPT_ENABLE:
0624 case EEH_OPT_FREEZE_PE:
0625 active_flag = 0;
0626 break;
0627 default:
0628 pr_warn("%s: Invalid function %d\n",
0629 __func__, function);
0630 return -EINVAL;
0631 }
0632
0633
0634
0635
0636
0637 if (active_flag) {
0638 rc = eeh_ops->get_state(pe, NULL);
0639 if (rc < 0)
0640 return rc;
0641
0642
0643 if (rc == EEH_STATE_NOT_SUPPORT)
0644 return 0;
0645
0646
0647 if (rc & active_flag)
0648 return 0;
0649 }
0650
0651
0652
0653 rc = eeh_ops->set_option(pe, function);
0654 if (rc)
0655 pr_warn("%s: Unexpected state change %d on "
0656 "PHB#%x-PE#%x, err=%d\n",
0657 __func__, function, pe->phb->global_number,
0658 pe->addr, rc);
0659
0660
0661 if (active_flag) {
0662 rc = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
0663 if (rc < 0)
0664 return rc;
0665
0666 if (rc & active_flag)
0667 return 0;
0668
0669 return -EIO;
0670 }
0671
0672 return rc;
0673 }
0674
0675 static void eeh_disable_and_save_dev_state(struct eeh_dev *edev,
0676 void *userdata)
0677 {
0678 struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
0679 struct pci_dev *dev = userdata;
0680
0681
0682
0683
0684
0685 if (!pdev || pdev == dev)
0686 return;
0687
0688
0689 pci_set_power_state(pdev, PCI_D0);
0690
0691
0692 pci_save_state(pdev);
0693
0694
0695
0696
0697
0698 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
0699 }
0700
0701 static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata)
0702 {
0703 struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
0704 struct pci_dev *dev = userdata;
0705
0706 if (!pdev)
0707 return;
0708
0709
0710 if (eeh_ops->restore_config)
0711 eeh_ops->restore_config(edev);
0712
0713
0714 if (pdev != dev)
0715 pci_restore_state(pdev);
0716 }
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726 int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
0727 {
0728 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
0729 struct eeh_pe *pe = eeh_dev_to_pe(edev);
0730
0731 if (!pe) {
0732 pr_err("%s: No PE found on PCI device %s\n",
0733 __func__, pci_name(dev));
0734 return -EINVAL;
0735 }
0736
0737 switch (state) {
0738 case pcie_deassert_reset:
0739 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
0740 eeh_unfreeze_pe(pe);
0741 if (!(pe->type & EEH_PE_VF))
0742 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
0743 eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
0744 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
0745 break;
0746 case pcie_hot_reset:
0747 eeh_pe_mark_isolated(pe);
0748 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
0749 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
0750 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
0751 if (!(pe->type & EEH_PE_VF))
0752 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
0753 eeh_ops->reset(pe, EEH_RESET_HOT);
0754 break;
0755 case pcie_warm_reset:
0756 eeh_pe_mark_isolated(pe);
0757 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
0758 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
0759 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
0760 if (!(pe->type & EEH_PE_VF))
0761 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
0762 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
0763 break;
0764 default:
0765 eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true);
0766 return -EINVAL;
0767 }
0768
0769 return 0;
0770 }
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782 static void eeh_set_dev_freset(struct eeh_dev *edev, void *flag)
0783 {
0784 struct pci_dev *dev;
0785 unsigned int *freset = (unsigned int *)flag;
0786
0787 dev = eeh_dev_to_pci_dev(edev);
0788 if (dev)
0789 *freset |= dev->needs_freset;
0790 }
0791
0792 static void eeh_pe_refreeze_passed(struct eeh_pe *root)
0793 {
0794 struct eeh_pe *pe;
0795 int state;
0796
0797 eeh_for_each_pe(root, pe) {
0798 if (eeh_pe_passed(pe)) {
0799 state = eeh_ops->get_state(pe, NULL);
0800 if (state &
0801 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED)) {
0802 pr_info("EEH: Passed-through PE PHB#%x-PE#%x was thawed by reset, re-freezing for safety.\n",
0803 pe->phb->global_number, pe->addr);
0804 eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
0805 }
0806 }
0807 }
0808 }
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823 int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed)
0824 {
0825 int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
0826 int type = EEH_RESET_HOT;
0827 unsigned int freset = 0;
0828 int i, state = 0, ret;
0829
0830
0831
0832
0833
0834
0835 eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
0836
0837 if (freset)
0838 type = EEH_RESET_FUNDAMENTAL;
0839
0840
0841 eeh_pe_state_mark(pe, reset_state);
0842
0843
0844 for (i = 0; i < 3; i++) {
0845 ret = eeh_pe_reset(pe, type, include_passed);
0846 if (!ret)
0847 ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE,
0848 include_passed);
0849 if (ret) {
0850 ret = -EIO;
0851 pr_warn("EEH: Failure %d resetting PHB#%x-PE#%x (attempt %d)\n\n",
0852 state, pe->phb->global_number, pe->addr, i + 1);
0853 continue;
0854 }
0855 if (i)
0856 pr_warn("EEH: PHB#%x-PE#%x: Successful reset (attempt %d)\n",
0857 pe->phb->global_number, pe->addr, i + 1);
0858
0859
0860 state = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
0861 if (state < 0) {
0862 pr_warn("EEH: Unrecoverable slot failure on PHB#%x-PE#%x",
0863 pe->phb->global_number, pe->addr);
0864 ret = -ENOTRECOVERABLE;
0865 break;
0866 }
0867 if (eeh_state_active(state))
0868 break;
0869 else
0870 pr_warn("EEH: PHB#%x-PE#%x: Slot inactive after reset: 0x%x (attempt %d)\n",
0871 pe->phb->global_number, pe->addr, state, i + 1);
0872 }
0873
0874
0875
0876
0877 if (!include_passed)
0878 eeh_pe_refreeze_passed(pe);
0879
0880 eeh_pe_state_clear(pe, reset_state, true);
0881 return ret;
0882 }
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893 void eeh_save_bars(struct eeh_dev *edev)
0894 {
0895 int i;
0896
0897 if (!edev)
0898 return;
0899
0900 for (i = 0; i < 16; i++)
0901 eeh_ops->read_config(edev, i * 4, 4, &edev->config_space[i]);
0902
0903
0904
0905
0906
0907
0908
0909 if (edev->mode & EEH_DEV_BRIDGE)
0910 edev->config_space[1] |= PCI_COMMAND_MASTER;
0911 }
0912
0913 static int eeh_reboot_notifier(struct notifier_block *nb,
0914 unsigned long action, void *unused)
0915 {
0916 eeh_clear_flag(EEH_ENABLED);
0917 return NOTIFY_DONE;
0918 }
0919
0920 static struct notifier_block eeh_reboot_nb = {
0921 .notifier_call = eeh_reboot_notifier,
0922 };
0923
0924 static int eeh_device_notifier(struct notifier_block *nb,
0925 unsigned long action, void *data)
0926 {
0927 struct device *dev = data;
0928
0929 switch (action) {
0930
0931
0932
0933
0934
0935 case BUS_NOTIFY_DEL_DEVICE:
0936 eeh_remove_device(to_pci_dev(dev));
0937 break;
0938 default:
0939 break;
0940 }
0941 return NOTIFY_DONE;
0942 }
0943
0944 static struct notifier_block eeh_device_nb = {
0945 .notifier_call = eeh_device_notifier,
0946 };
0947
0948
0949
0950
0951
0952
0953
0954 int eeh_init(struct eeh_ops *ops)
0955 {
0956 struct pci_controller *hose, *tmp;
0957 int ret = 0;
0958
0959
0960 if (WARN_ON(eeh_ops))
0961 return -EEXIST;
0962 if (WARN_ON(!ops))
0963 return -ENOENT;
0964 eeh_ops = ops;
0965
0966
0967 ret = register_reboot_notifier(&eeh_reboot_nb);
0968 if (ret) {
0969 pr_warn("%s: Failed to register reboot notifier (%d)\n",
0970 __func__, ret);
0971 return ret;
0972 }
0973
0974 ret = bus_register_notifier(&pci_bus_type, &eeh_device_nb);
0975 if (ret) {
0976 pr_warn("%s: Failed to register bus notifier (%d)\n",
0977 __func__, ret);
0978 return ret;
0979 }
0980
0981
0982 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
0983 eeh_phb_pe_create(hose);
0984
0985 eeh_addr_cache_init();
0986
0987
0988 return eeh_event_init();
0989 }
0990
0991
0992
0993
0994
0995
0996
0997
0998 void eeh_probe_device(struct pci_dev *dev)
0999 {
1000 struct eeh_dev *edev;
1001
1002 pr_debug("EEH: Adding device %s\n", pci_name(dev));
1003
1004
1005
1006
1007
1008 if (WARN_ON_ONCE(pci_dev_to_eeh_dev(dev))) {
1009 pci_dbg(dev, "Already bound to an eeh_dev!\n");
1010 return;
1011 }
1012
1013 edev = eeh_ops->probe(dev);
1014 if (!edev) {
1015 pr_debug("EEH: Adding device failed\n");
1016 return;
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 if (edev->pdev && edev->pdev != dev) {
1029 eeh_pe_tree_remove(edev);
1030 eeh_addr_cache_rmv_dev(edev->pdev);
1031 eeh_sysfs_remove_device(edev->pdev);
1032
1033
1034
1035
1036
1037
1038 edev->mode |= EEH_DEV_NO_HANDLER;
1039 }
1040
1041
1042 edev->pdev = dev;
1043 dev->dev.archdata.edev = edev;
1044 eeh_addr_cache_insert_dev(dev);
1045 eeh_sysfs_add_device(dev);
1046 }
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 void eeh_remove_device(struct pci_dev *dev)
1059 {
1060 struct eeh_dev *edev;
1061
1062 if (!dev || !eeh_enabled())
1063 return;
1064 edev = pci_dev_to_eeh_dev(dev);
1065
1066
1067 dev_dbg(&dev->dev, "EEH: Removing device\n");
1068
1069 if (!edev || !edev->pdev || !edev->pe) {
1070 dev_dbg(&dev->dev, "EEH: Device not referenced!\n");
1071 return;
1072 }
1073
1074
1075
1076
1077
1078
1079
1080 edev->pdev = NULL;
1081
1082
1083
1084
1085
1086 if (edev->mode & EEH_DEV_SYSFS)
1087 eeh_sysfs_remove_device(dev);
1088
1089
1090
1091
1092
1093
1094
1095 edev->mode |= EEH_DEV_NO_HANDLER;
1096
1097 eeh_addr_cache_rmv_dev(dev);
1098
1099
1100
1101
1102
1103
1104
1105 edev->in_error = false;
1106 dev->dev.archdata.edev = NULL;
1107 if (!(edev->pe->state & EEH_PE_KEEP))
1108 eeh_pe_tree_remove(edev);
1109 else
1110 edev->mode |= EEH_DEV_DISCONNECTED;
1111 }
1112
1113 int eeh_unfreeze_pe(struct eeh_pe *pe)
1114 {
1115 int ret;
1116
1117 ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
1118 if (ret) {
1119 pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n",
1120 __func__, ret, pe->phb->global_number, pe->addr);
1121 return ret;
1122 }
1123
1124 ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
1125 if (ret) {
1126 pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n",
1127 __func__, ret, pe->phb->global_number, pe->addr);
1128 return ret;
1129 }
1130
1131 return ret;
1132 }
1133
1134
1135 static struct pci_device_id eeh_reset_ids[] = {
1136 { PCI_DEVICE(0x19a2, 0x0710) },
1137 { PCI_DEVICE(0x10df, 0xe220) },
1138 { PCI_DEVICE(0x14e4, 0x1657) },
1139 { 0 }
1140 };
1141
1142 static int eeh_pe_change_owner(struct eeh_pe *pe)
1143 {
1144 struct eeh_dev *edev, *tmp;
1145 struct pci_dev *pdev;
1146 struct pci_device_id *id;
1147 int ret;
1148
1149
1150 ret = eeh_ops->get_state(pe, NULL);
1151 if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT)
1152 return 0;
1153
1154
1155 if (eeh_state_active(ret))
1156 return 0;
1157
1158
1159 eeh_pe_for_each_dev(pe, edev, tmp) {
1160 pdev = eeh_dev_to_pci_dev(edev);
1161 if (!pdev)
1162 continue;
1163
1164 for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) {
1165 if (id->vendor != PCI_ANY_ID &&
1166 id->vendor != pdev->vendor)
1167 continue;
1168 if (id->device != PCI_ANY_ID &&
1169 id->device != pdev->device)
1170 continue;
1171 if (id->subvendor != PCI_ANY_ID &&
1172 id->subvendor != pdev->subsystem_vendor)
1173 continue;
1174 if (id->subdevice != PCI_ANY_ID &&
1175 id->subdevice != pdev->subsystem_device)
1176 continue;
1177
1178 return eeh_pe_reset_and_recover(pe);
1179 }
1180 }
1181
1182 ret = eeh_unfreeze_pe(pe);
1183 if (!ret)
1184 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
1185 return ret;
1186 }
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 int eeh_dev_open(struct pci_dev *pdev)
1198 {
1199 struct eeh_dev *edev;
1200 int ret = -ENODEV;
1201
1202 mutex_lock(&eeh_dev_mutex);
1203
1204
1205 if (!pdev)
1206 goto out;
1207
1208
1209 edev = pci_dev_to_eeh_dev(pdev);
1210 if (!edev || !edev->pe)
1211 goto out;
1212
1213
1214
1215
1216
1217
1218
1219 ret = eeh_pe_change_owner(edev->pe);
1220 if (ret)
1221 goto out;
1222
1223
1224 atomic_inc(&edev->pe->pass_dev_cnt);
1225 mutex_unlock(&eeh_dev_mutex);
1226
1227 return 0;
1228 out:
1229 mutex_unlock(&eeh_dev_mutex);
1230 return ret;
1231 }
1232 EXPORT_SYMBOL_GPL(eeh_dev_open);
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 void eeh_dev_release(struct pci_dev *pdev)
1243 {
1244 struct eeh_dev *edev;
1245
1246 mutex_lock(&eeh_dev_mutex);
1247
1248
1249 if (!pdev)
1250 goto out;
1251
1252
1253 edev = pci_dev_to_eeh_dev(pdev);
1254 if (!edev || !edev->pe || !eeh_pe_passed(edev->pe))
1255 goto out;
1256
1257
1258 WARN_ON(atomic_dec_if_positive(&edev->pe->pass_dev_cnt) < 0);
1259 eeh_pe_change_owner(edev->pe);
1260 out:
1261 mutex_unlock(&eeh_dev_mutex);
1262 }
1263 EXPORT_SYMBOL(eeh_dev_release);
1264
1265 #ifdef CONFIG_IOMMU_API
1266
1267 static int dev_has_iommu_table(struct device *dev, void *data)
1268 {
1269 struct pci_dev *pdev = to_pci_dev(dev);
1270 struct pci_dev **ppdev = data;
1271
1272 if (!dev)
1273 return 0;
1274
1275 if (device_iommu_mapped(dev)) {
1276 *ppdev = pdev;
1277 return 1;
1278 }
1279
1280 return 0;
1281 }
1282
1283
1284
1285
1286
1287
1288
1289 struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group)
1290 {
1291 struct pci_dev *pdev = NULL;
1292 struct eeh_dev *edev;
1293 int ret;
1294
1295
1296 if (!group)
1297 return NULL;
1298
1299 ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table);
1300 if (!ret || !pdev)
1301 return NULL;
1302
1303
1304 edev = pci_dev_to_eeh_dev(pdev);
1305 if (!edev || !edev->pe)
1306 return NULL;
1307
1308 return edev->pe;
1309 }
1310 EXPORT_SYMBOL_GPL(eeh_iommu_group_to_pe);
1311
1312 #endif
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322 int eeh_pe_set_option(struct eeh_pe *pe, int option)
1323 {
1324 int ret = 0;
1325
1326
1327 if (!pe)
1328 return -ENODEV;
1329
1330
1331
1332
1333
1334
1335 switch (option) {
1336 case EEH_OPT_ENABLE:
1337 if (eeh_enabled()) {
1338 ret = eeh_pe_change_owner(pe);
1339 break;
1340 }
1341 ret = -EIO;
1342 break;
1343 case EEH_OPT_DISABLE:
1344 break;
1345 case EEH_OPT_THAW_MMIO:
1346 case EEH_OPT_THAW_DMA:
1347 case EEH_OPT_FREEZE_PE:
1348 if (!eeh_ops || !eeh_ops->set_option) {
1349 ret = -ENOENT;
1350 break;
1351 }
1352
1353 ret = eeh_pci_enable(pe, option);
1354 break;
1355 default:
1356 pr_debug("%s: Option %d out of range (%d, %d)\n",
1357 __func__, option, EEH_OPT_DISABLE, EEH_OPT_THAW_DMA);
1358 ret = -EINVAL;
1359 }
1360
1361 return ret;
1362 }
1363 EXPORT_SYMBOL_GPL(eeh_pe_set_option);
1364
1365
1366
1367
1368
1369
1370
1371
1372 int eeh_pe_get_state(struct eeh_pe *pe)
1373 {
1374 int result, ret = 0;
1375 bool rst_active, dma_en, mmio_en;
1376
1377
1378 if (!pe)
1379 return -ENODEV;
1380
1381 if (!eeh_ops || !eeh_ops->get_state)
1382 return -ENOENT;
1383
1384
1385
1386
1387
1388
1389
1390 if (pe->parent &&
1391 !(pe->state & EEH_PE_REMOVED) &&
1392 (pe->parent->state & (EEH_PE_ISOLATED | EEH_PE_RECOVERING)))
1393 return EEH_PE_STATE_UNAVAIL;
1394
1395 result = eeh_ops->get_state(pe, NULL);
1396 rst_active = !!(result & EEH_STATE_RESET_ACTIVE);
1397 dma_en = !!(result & EEH_STATE_DMA_ENABLED);
1398 mmio_en = !!(result & EEH_STATE_MMIO_ENABLED);
1399
1400 if (rst_active)
1401 ret = EEH_PE_STATE_RESET;
1402 else if (dma_en && mmio_en)
1403 ret = EEH_PE_STATE_NORMAL;
1404 else if (!dma_en && !mmio_en)
1405 ret = EEH_PE_STATE_STOPPED_IO_DMA;
1406 else if (!dma_en && mmio_en)
1407 ret = EEH_PE_STATE_STOPPED_DMA;
1408 else
1409 ret = EEH_PE_STATE_UNAVAIL;
1410
1411 return ret;
1412 }
1413 EXPORT_SYMBOL_GPL(eeh_pe_get_state);
1414
1415 static int eeh_pe_reenable_devices(struct eeh_pe *pe, bool include_passed)
1416 {
1417 struct eeh_dev *edev, *tmp;
1418 struct pci_dev *pdev;
1419 int ret = 0;
1420
1421 eeh_pe_restore_bars(pe);
1422
1423
1424
1425
1426
1427 eeh_pe_for_each_dev(pe, edev, tmp) {
1428 pdev = eeh_dev_to_pci_dev(edev);
1429 if (!pdev)
1430 continue;
1431
1432 ret = pci_reenable_device(pdev);
1433 if (ret) {
1434 pr_warn("%s: Failure %d reenabling %s\n",
1435 __func__, ret, pci_name(pdev));
1436 return ret;
1437 }
1438 }
1439
1440
1441 if (include_passed || !eeh_pe_passed(pe)) {
1442 ret = eeh_unfreeze_pe(pe);
1443 } else
1444 pr_info("EEH: Note: Leaving passthrough PHB#%x-PE#%x frozen.\n",
1445 pe->phb->global_number, pe->addr);
1446 if (!ret)
1447 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, include_passed);
1448 return ret;
1449 }
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462 int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed)
1463 {
1464 int ret = 0;
1465
1466
1467 if (!pe)
1468 return -ENODEV;
1469
1470 if (!eeh_ops || !eeh_ops->set_option || !eeh_ops->reset)
1471 return -ENOENT;
1472
1473 switch (option) {
1474 case EEH_RESET_DEACTIVATE:
1475 ret = eeh_ops->reset(pe, option);
1476 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, include_passed);
1477 if (ret)
1478 break;
1479
1480 ret = eeh_pe_reenable_devices(pe, include_passed);
1481 break;
1482 case EEH_RESET_HOT:
1483 case EEH_RESET_FUNDAMENTAL:
1484
1485
1486
1487
1488
1489 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
1490
1491 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
1492 ret = eeh_ops->reset(pe, option);
1493 break;
1494 default:
1495 pr_debug("%s: Unsupported option %d\n",
1496 __func__, option);
1497 ret = -EINVAL;
1498 }
1499
1500 return ret;
1501 }
1502 EXPORT_SYMBOL_GPL(eeh_pe_reset);
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512 int eeh_pe_configure(struct eeh_pe *pe)
1513 {
1514 int ret = 0;
1515
1516
1517 if (!pe)
1518 return -ENODEV;
1519
1520 return ret;
1521 }
1522 EXPORT_SYMBOL_GPL(eeh_pe_configure);
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
1537 unsigned long addr, unsigned long mask)
1538 {
1539
1540 if (!pe)
1541 return -ENODEV;
1542
1543
1544 if (!eeh_ops || !eeh_ops->err_inject)
1545 return -ENOENT;
1546
1547
1548 if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64)
1549 return -EINVAL;
1550
1551
1552 if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX)
1553 return -EINVAL;
1554
1555 return eeh_ops->err_inject(pe, type, func, addr, mask);
1556 }
1557 EXPORT_SYMBOL_GPL(eeh_pe_inject_err);
1558
1559 #ifdef CONFIG_PROC_FS
1560 static int proc_eeh_show(struct seq_file *m, void *v)
1561 {
1562 if (!eeh_enabled()) {
1563 seq_printf(m, "EEH Subsystem is globally disabled\n");
1564 seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
1565 } else {
1566 seq_printf(m, "EEH Subsystem is enabled\n");
1567 seq_printf(m,
1568 "no device=%llu\n"
1569 "no device node=%llu\n"
1570 "no config address=%llu\n"
1571 "check not wanted=%llu\n"
1572 "eeh_total_mmio_ffs=%llu\n"
1573 "eeh_false_positives=%llu\n"
1574 "eeh_slot_resets=%llu\n",
1575 eeh_stats.no_device,
1576 eeh_stats.no_dn,
1577 eeh_stats.no_cfg_addr,
1578 eeh_stats.ignored_check,
1579 eeh_stats.total_mmio_ffs,
1580 eeh_stats.false_positives,
1581 eeh_stats.slot_resets);
1582 }
1583
1584 return 0;
1585 }
1586 #endif
1587
1588 #ifdef CONFIG_DEBUG_FS
1589
1590
1591 static struct pci_dev *eeh_debug_lookup_pdev(struct file *filp,
1592 const char __user *user_buf,
1593 size_t count, loff_t *ppos)
1594 {
1595 uint32_t domain, bus, dev, fn;
1596 struct pci_dev *pdev;
1597 char buf[20];
1598 int ret;
1599
1600 memset(buf, 0, sizeof(buf));
1601 ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
1602 if (!ret)
1603 return ERR_PTR(-EFAULT);
1604
1605 ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
1606 if (ret != 4) {
1607 pr_err("%s: expected 4 args, got %d\n", __func__, ret);
1608 return ERR_PTR(-EINVAL);
1609 }
1610
1611 pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
1612 if (!pdev)
1613 return ERR_PTR(-ENODEV);
1614
1615 return pdev;
1616 }
1617
1618 static int eeh_enable_dbgfs_set(void *data, u64 val)
1619 {
1620 if (val)
1621 eeh_clear_flag(EEH_FORCE_DISABLED);
1622 else
1623 eeh_add_flag(EEH_FORCE_DISABLED);
1624
1625 return 0;
1626 }
1627
1628 static int eeh_enable_dbgfs_get(void *data, u64 *val)
1629 {
1630 if (eeh_enabled())
1631 *val = 0x1ul;
1632 else
1633 *val = 0x0ul;
1634 return 0;
1635 }
1636
1637 DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
1638 eeh_enable_dbgfs_set, "0x%llx\n");
1639
1640 static ssize_t eeh_force_recover_write(struct file *filp,
1641 const char __user *user_buf,
1642 size_t count, loff_t *ppos)
1643 {
1644 struct pci_controller *hose;
1645 uint32_t phbid, pe_no;
1646 struct eeh_pe *pe;
1647 char buf[20];
1648 int ret;
1649
1650 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
1651 if (!ret)
1652 return -EFAULT;
1653
1654
1655
1656
1657
1658
1659
1660 if (!strncmp(buf, "hwcheck", 7)) {
1661 __eeh_send_failure_event(NULL);
1662 return count;
1663 }
1664
1665 ret = sscanf(buf, "%x:%x", &phbid, &pe_no);
1666 if (ret != 2)
1667 return -EINVAL;
1668
1669 hose = pci_find_controller_for_domain(phbid);
1670 if (!hose)
1671 return -ENODEV;
1672
1673
1674 pe = eeh_pe_get(hose, pe_no);
1675 if (!pe)
1676 return -ENODEV;
1677
1678
1679
1680
1681
1682
1683
1684
1685 __eeh_send_failure_event(pe);
1686
1687 return ret < 0 ? ret : count;
1688 }
1689
1690 static const struct file_operations eeh_force_recover_fops = {
1691 .open = simple_open,
1692 .llseek = no_llseek,
1693 .write = eeh_force_recover_write,
1694 };
1695
1696 static ssize_t eeh_debugfs_dev_usage(struct file *filp,
1697 char __user *user_buf,
1698 size_t count, loff_t *ppos)
1699 {
1700 static const char usage[] = "input format: <domain>:<bus>:<dev>.<fn>\n";
1701
1702 return simple_read_from_buffer(user_buf, count, ppos,
1703 usage, sizeof(usage) - 1);
1704 }
1705
1706 static ssize_t eeh_dev_check_write(struct file *filp,
1707 const char __user *user_buf,
1708 size_t count, loff_t *ppos)
1709 {
1710 struct pci_dev *pdev;
1711 struct eeh_dev *edev;
1712 int ret;
1713
1714 pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
1715 if (IS_ERR(pdev))
1716 return PTR_ERR(pdev);
1717
1718 edev = pci_dev_to_eeh_dev(pdev);
1719 if (!edev) {
1720 pci_err(pdev, "No eeh_dev for this device!\n");
1721 pci_dev_put(pdev);
1722 return -ENODEV;
1723 }
1724
1725 ret = eeh_dev_check_failure(edev);
1726 pci_info(pdev, "eeh_dev_check_failure(%s) = %d\n",
1727 pci_name(pdev), ret);
1728
1729 pci_dev_put(pdev);
1730
1731 return count;
1732 }
1733
1734 static const struct file_operations eeh_dev_check_fops = {
1735 .open = simple_open,
1736 .llseek = no_llseek,
1737 .write = eeh_dev_check_write,
1738 .read = eeh_debugfs_dev_usage,
1739 };
1740
1741 static int eeh_debugfs_break_device(struct pci_dev *pdev)
1742 {
1743 struct resource *bar = NULL;
1744 void __iomem *mapped;
1745 u16 old, bit;
1746 int i, pos;
1747
1748
1749 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
1750 struct resource *r = &pdev->resource[i];
1751
1752 if (!r->flags || !r->start)
1753 continue;
1754 if (r->flags & IORESOURCE_IO)
1755 continue;
1756 if (r->flags & IORESOURCE_UNSET)
1757 continue;
1758
1759 bar = r;
1760 break;
1761 }
1762
1763 if (!bar) {
1764 pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
1765 return -ENXIO;
1766 }
1767
1768 pci_err(pdev, "Going to break: %pR\n", bar);
1769
1770 if (pdev->is_virtfn) {
1771 #ifndef CONFIG_PCI_IOV
1772 return -ENXIO;
1773 #else
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784 pdev = pdev->physfn;
1785 if (!pdev)
1786 return -ENXIO;
1787
1788 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1789 pos += PCI_SRIOV_CTRL;
1790 bit = PCI_SRIOV_CTRL_MSE;
1791 #endif
1792 } else {
1793 bit = PCI_COMMAND_MEMORY;
1794 pos = PCI_COMMAND;
1795 }
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 pci_read_config_word(pdev, pos, &old);
1818
1819 mapped = ioremap(bar->start, PAGE_SIZE);
1820 if (!mapped) {
1821 pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
1822 return -ENXIO;
1823 }
1824
1825 pci_write_config_word(pdev, pos, old & ~bit);
1826 in_8(mapped);
1827 pci_write_config_word(pdev, pos, old);
1828
1829 iounmap(mapped);
1830
1831 return 0;
1832 }
1833
1834 static ssize_t eeh_dev_break_write(struct file *filp,
1835 const char __user *user_buf,
1836 size_t count, loff_t *ppos)
1837 {
1838 struct pci_dev *pdev;
1839 int ret;
1840
1841 pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
1842 if (IS_ERR(pdev))
1843 return PTR_ERR(pdev);
1844
1845 ret = eeh_debugfs_break_device(pdev);
1846 pci_dev_put(pdev);
1847
1848 if (ret < 0)
1849 return ret;
1850
1851 return count;
1852 }
1853
1854 static const struct file_operations eeh_dev_break_fops = {
1855 .open = simple_open,
1856 .llseek = no_llseek,
1857 .write = eeh_dev_break_write,
1858 .read = eeh_debugfs_dev_usage,
1859 };
1860
1861 static ssize_t eeh_dev_can_recover(struct file *filp,
1862 const char __user *user_buf,
1863 size_t count, loff_t *ppos)
1864 {
1865 struct pci_driver *drv;
1866 struct pci_dev *pdev;
1867 size_t ret;
1868
1869 pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
1870 if (IS_ERR(pdev))
1871 return PTR_ERR(pdev);
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886 drv = pci_dev_driver(pdev);
1887 if (drv &&
1888 drv->err_handler &&
1889 drv->err_handler->error_detected &&
1890 drv->err_handler->slot_reset) {
1891 ret = count;
1892 } else {
1893 ret = -EOPNOTSUPP;
1894 }
1895
1896 pci_dev_put(pdev);
1897
1898 return ret;
1899 }
1900
1901 static const struct file_operations eeh_dev_can_recover_fops = {
1902 .open = simple_open,
1903 .llseek = no_llseek,
1904 .write = eeh_dev_can_recover,
1905 .read = eeh_debugfs_dev_usage,
1906 };
1907
1908 #endif
1909
1910 static int __init eeh_init_proc(void)
1911 {
1912 if (machine_is(pseries) || machine_is(powernv)) {
1913 proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show);
1914 #ifdef CONFIG_DEBUG_FS
1915 debugfs_create_file_unsafe("eeh_enable", 0600,
1916 arch_debugfs_dir, NULL,
1917 &eeh_enable_dbgfs_ops);
1918 debugfs_create_u32("eeh_max_freezes", 0600,
1919 arch_debugfs_dir, &eeh_max_freezes);
1920 debugfs_create_bool("eeh_disable_recovery", 0600,
1921 arch_debugfs_dir,
1922 &eeh_debugfs_no_recover);
1923 debugfs_create_file_unsafe("eeh_dev_check", 0600,
1924 arch_debugfs_dir, NULL,
1925 &eeh_dev_check_fops);
1926 debugfs_create_file_unsafe("eeh_dev_break", 0600,
1927 arch_debugfs_dir, NULL,
1928 &eeh_dev_break_fops);
1929 debugfs_create_file_unsafe("eeh_force_recover", 0600,
1930 arch_debugfs_dir, NULL,
1931 &eeh_force_recover_fops);
1932 debugfs_create_file_unsafe("eeh_dev_can_recover", 0600,
1933 arch_debugfs_dir, NULL,
1934 &eeh_dev_can_recover_fops);
1935 eeh_cache_debugfs_init();
1936 #endif
1937 }
1938
1939 return 0;
1940 }
1941 __initcall(eeh_init_proc);