0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #define pr_fmt(fmt) "AER: " fmt
0016 #define dev_fmt pr_fmt
0017
0018 #include <linux/bitops.h>
0019 #include <linux/cper.h>
0020 #include <linux/pci.h>
0021 #include <linux/pci-acpi.h>
0022 #include <linux/sched.h>
0023 #include <linux/kernel.h>
0024 #include <linux/errno.h>
0025 #include <linux/pm.h>
0026 #include <linux/init.h>
0027 #include <linux/interrupt.h>
0028 #include <linux/delay.h>
0029 #include <linux/kfifo.h>
0030 #include <linux/slab.h>
0031 #include <acpi/apei.h>
0032 #include <ras/ras_event.h>
0033
0034 #include "../pci.h"
0035 #include "portdrv.h"
0036
0037 #define AER_ERROR_SOURCES_MAX 128
0038
0039 #define AER_MAX_TYPEOF_COR_ERRS 16
0040 #define AER_MAX_TYPEOF_UNCOR_ERRS 27
0041
0042 struct aer_err_source {
0043 unsigned int status;
0044 unsigned int id;
0045 };
0046
0047 struct aer_rpc {
0048 struct pci_dev *rpd;
0049 DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
0050 };
0051
0052
0053 struct aer_stats {
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
0066
0067 u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
0068
0069 u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
0070
0071 u64 dev_total_cor_errs;
0072
0073 u64 dev_total_fatal_errs;
0074
0075 u64 dev_total_nonfatal_errs;
0076
0077
0078
0079
0080
0081
0082
0083 u64 rootport_total_cor_errs;
0084 u64 rootport_total_fatal_errs;
0085 u64 rootport_total_nonfatal_errs;
0086 };
0087
0088 #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
0089 PCI_ERR_UNC_ECRC| \
0090 PCI_ERR_UNC_UNSUP| \
0091 PCI_ERR_UNC_COMP_ABORT| \
0092 PCI_ERR_UNC_UNX_COMP| \
0093 PCI_ERR_UNC_MALF_TLP)
0094
0095 #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
0096 PCI_EXP_RTCTL_SENFEE| \
0097 PCI_EXP_RTCTL_SEFEE)
0098 #define ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \
0099 PCI_ERR_ROOT_CMD_NONFATAL_EN| \
0100 PCI_ERR_ROOT_CMD_FATAL_EN)
0101 #define ERR_COR_ID(d) (d & 0xffff)
0102 #define ERR_UNCOR_ID(d) (d >> 16)
0103
0104 #define AER_ERR_STATUS_MASK (PCI_ERR_ROOT_UNCOR_RCV | \
0105 PCI_ERR_ROOT_COR_RCV | \
0106 PCI_ERR_ROOT_MULTI_COR_RCV | \
0107 PCI_ERR_ROOT_MULTI_UNCOR_RCV)
0108
0109 static int pcie_aer_disable;
0110 static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
0111
0112 void pci_no_aer(void)
0113 {
0114 pcie_aer_disable = 1;
0115 }
0116
0117 bool pci_aer_available(void)
0118 {
0119 return !pcie_aer_disable && pci_msi_enabled();
0120 }
0121
0122 #ifdef CONFIG_PCIE_ECRC
0123
0124 #define ECRC_POLICY_DEFAULT 0
0125 #define ECRC_POLICY_OFF 1
0126 #define ECRC_POLICY_ON 2
0127
0128 static int ecrc_policy = ECRC_POLICY_DEFAULT;
0129
0130 static const char * const ecrc_policy_str[] = {
0131 [ECRC_POLICY_DEFAULT] = "bios",
0132 [ECRC_POLICY_OFF] = "off",
0133 [ECRC_POLICY_ON] = "on"
0134 };
0135
0136
0137
0138
0139
0140
0141
0142 static int enable_ecrc_checking(struct pci_dev *dev)
0143 {
0144 int aer = dev->aer_cap;
0145 u32 reg32;
0146
0147 if (!aer)
0148 return -ENODEV;
0149
0150 pci_read_config_dword(dev, aer + PCI_ERR_CAP, ®32);
0151 if (reg32 & PCI_ERR_CAP_ECRC_GENC)
0152 reg32 |= PCI_ERR_CAP_ECRC_GENE;
0153 if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
0154 reg32 |= PCI_ERR_CAP_ECRC_CHKE;
0155 pci_write_config_dword(dev, aer + PCI_ERR_CAP, reg32);
0156
0157 return 0;
0158 }
0159
0160
0161
0162
0163
0164
0165
0166 static int disable_ecrc_checking(struct pci_dev *dev)
0167 {
0168 int aer = dev->aer_cap;
0169 u32 reg32;
0170
0171 if (!aer)
0172 return -ENODEV;
0173
0174 pci_read_config_dword(dev, aer + PCI_ERR_CAP, ®32);
0175 reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
0176 pci_write_config_dword(dev, aer + PCI_ERR_CAP, reg32);
0177
0178 return 0;
0179 }
0180
0181
0182
0183
0184
0185 void pcie_set_ecrc_checking(struct pci_dev *dev)
0186 {
0187 switch (ecrc_policy) {
0188 case ECRC_POLICY_DEFAULT:
0189 return;
0190 case ECRC_POLICY_OFF:
0191 disable_ecrc_checking(dev);
0192 break;
0193 case ECRC_POLICY_ON:
0194 enable_ecrc_checking(dev);
0195 break;
0196 default:
0197 return;
0198 }
0199 }
0200
0201
0202
0203
0204
0205 void pcie_ecrc_get_policy(char *str)
0206 {
0207 int i;
0208
0209 i = match_string(ecrc_policy_str, ARRAY_SIZE(ecrc_policy_str), str);
0210 if (i < 0)
0211 return;
0212
0213 ecrc_policy = i;
0214 }
0215 #endif
0216
0217 #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
0218 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
0219
0220 int pcie_aer_is_native(struct pci_dev *dev)
0221 {
0222 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
0223
0224 if (!dev->aer_cap)
0225 return 0;
0226
0227 return pcie_ports_native || host->native_aer;
0228 }
0229
0230 int pci_enable_pcie_error_reporting(struct pci_dev *dev)
0231 {
0232 int rc;
0233
0234 if (!pcie_aer_is_native(dev))
0235 return -EIO;
0236
0237 rc = pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
0238 return pcibios_err_to_errno(rc);
0239 }
0240 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
0241
0242 int pci_disable_pcie_error_reporting(struct pci_dev *dev)
0243 {
0244 int rc;
0245
0246 if (!pcie_aer_is_native(dev))
0247 return -EIO;
0248
0249 rc = pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
0250 return pcibios_err_to_errno(rc);
0251 }
0252 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
0253
0254 int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
0255 {
0256 int aer = dev->aer_cap;
0257 u32 status, sev;
0258
0259 if (!pcie_aer_is_native(dev))
0260 return -EIO;
0261
0262
0263 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
0264 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
0265 status &= ~sev;
0266 if (status)
0267 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
0268
0269 return 0;
0270 }
0271 EXPORT_SYMBOL_GPL(pci_aer_clear_nonfatal_status);
0272
0273 void pci_aer_clear_fatal_status(struct pci_dev *dev)
0274 {
0275 int aer = dev->aer_cap;
0276 u32 status, sev;
0277
0278 if (!pcie_aer_is_native(dev))
0279 return;
0280
0281
0282 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
0283 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
0284 status &= sev;
0285 if (status)
0286 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
0287 }
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 int pci_aer_raw_clear_status(struct pci_dev *dev)
0299 {
0300 int aer = dev->aer_cap;
0301 u32 status;
0302 int port_type;
0303
0304 if (!aer)
0305 return -EIO;
0306
0307 port_type = pci_pcie_type(dev);
0308 if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
0309 port_type == PCI_EXP_TYPE_RC_EC) {
0310 pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &status);
0311 pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, status);
0312 }
0313
0314 pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
0315 pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS, status);
0316
0317 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
0318 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
0319
0320 return 0;
0321 }
0322
0323 int pci_aer_clear_status(struct pci_dev *dev)
0324 {
0325 if (!pcie_aer_is_native(dev))
0326 return -EIO;
0327
0328 return pci_aer_raw_clear_status(dev);
0329 }
0330
0331 void pci_save_aer_state(struct pci_dev *dev)
0332 {
0333 int aer = dev->aer_cap;
0334 struct pci_cap_saved_state *save_state;
0335 u32 *cap;
0336
0337 if (!aer)
0338 return;
0339
0340 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
0341 if (!save_state)
0342 return;
0343
0344 cap = &save_state->cap.data[0];
0345 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, cap++);
0346 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, cap++);
0347 pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, cap++);
0348 pci_read_config_dword(dev, aer + PCI_ERR_CAP, cap++);
0349 if (pcie_cap_has_rtctl(dev))
0350 pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, cap++);
0351 }
0352
0353 void pci_restore_aer_state(struct pci_dev *dev)
0354 {
0355 int aer = dev->aer_cap;
0356 struct pci_cap_saved_state *save_state;
0357 u32 *cap;
0358
0359 if (!aer)
0360 return;
0361
0362 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
0363 if (!save_state)
0364 return;
0365
0366 cap = &save_state->cap.data[0];
0367 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, *cap++);
0368 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, *cap++);
0369 pci_write_config_dword(dev, aer + PCI_ERR_COR_MASK, *cap++);
0370 pci_write_config_dword(dev, aer + PCI_ERR_CAP, *cap++);
0371 if (pcie_cap_has_rtctl(dev))
0372 pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, *cap++);
0373 }
0374
0375 void pci_aer_init(struct pci_dev *dev)
0376 {
0377 int n;
0378
0379 dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
0380 if (!dev->aer_cap)
0381 return;
0382
0383 dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
0384
0385
0386
0387
0388
0389
0390
0391 n = pcie_cap_has_rtctl(dev) ? 5 : 4;
0392 pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
0393
0394 pci_aer_clear_status(dev);
0395
0396 if (pci_aer_available())
0397 pci_enable_pcie_error_reporting(dev);
0398
0399 pcie_set_ecrc_checking(dev);
0400 }
0401
0402 void pci_aer_exit(struct pci_dev *dev)
0403 {
0404 kfree(dev->aer_stats);
0405 dev->aer_stats = NULL;
0406 }
0407
0408 #define AER_AGENT_RECEIVER 0
0409 #define AER_AGENT_REQUESTER 1
0410 #define AER_AGENT_COMPLETER 2
0411 #define AER_AGENT_TRANSMITTER 3
0412
0413 #define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \
0414 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
0415 #define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \
0416 0 : PCI_ERR_UNC_COMP_ABORT)
0417 #define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \
0418 (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
0419
0420 #define AER_GET_AGENT(t, e) \
0421 ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \
0422 (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \
0423 (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \
0424 AER_AGENT_RECEIVER)
0425
0426 #define AER_PHYSICAL_LAYER_ERROR 0
0427 #define AER_DATA_LINK_LAYER_ERROR 1
0428 #define AER_TRANSACTION_LAYER_ERROR 2
0429
0430 #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
0431 PCI_ERR_COR_RCVR : 0)
0432 #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
0433 (PCI_ERR_COR_BAD_TLP| \
0434 PCI_ERR_COR_BAD_DLLP| \
0435 PCI_ERR_COR_REP_ROLL| \
0436 PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
0437
0438 #define AER_GET_LAYER_ERROR(t, e) \
0439 ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
0440 (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
0441 AER_TRANSACTION_LAYER_ERROR)
0442
0443
0444
0445
0446 static const char *aer_error_severity_string[] = {
0447 "Uncorrected (Non-Fatal)",
0448 "Uncorrected (Fatal)",
0449 "Corrected"
0450 };
0451
0452 static const char *aer_error_layer[] = {
0453 "Physical Layer",
0454 "Data Link Layer",
0455 "Transaction Layer"
0456 };
0457
0458 static const char *aer_correctable_error_string[] = {
0459 "RxErr",
0460 NULL,
0461 NULL,
0462 NULL,
0463 NULL,
0464 NULL,
0465 "BadTLP",
0466 "BadDLLP",
0467 "Rollover",
0468 NULL,
0469 NULL,
0470 NULL,
0471 "Timeout",
0472 "NonFatalErr",
0473 "CorrIntErr",
0474 "HeaderOF",
0475 NULL,
0476 NULL,
0477 NULL,
0478 NULL,
0479 NULL,
0480 NULL,
0481 NULL,
0482 NULL,
0483 NULL,
0484 NULL,
0485 NULL,
0486 NULL,
0487 NULL,
0488 NULL,
0489 NULL,
0490 NULL,
0491 };
0492
0493 static const char *aer_uncorrectable_error_string[] = {
0494 "Undefined",
0495 NULL,
0496 NULL,
0497 NULL,
0498 "DLP",
0499 "SDES",
0500 NULL,
0501 NULL,
0502 NULL,
0503 NULL,
0504 NULL,
0505 NULL,
0506 "TLP",
0507 "FCP",
0508 "CmpltTO",
0509 "CmpltAbrt",
0510 "UnxCmplt",
0511 "RxOF",
0512 "MalfTLP",
0513 "ECRC",
0514 "UnsupReq",
0515 "ACSViol",
0516 "UncorrIntErr",
0517 "BlockedTLP",
0518 "AtomicOpBlocked",
0519 "TLPBlockedErr",
0520 "PoisonTLPBlocked",
0521 NULL,
0522 NULL,
0523 NULL,
0524 NULL,
0525 NULL,
0526 };
0527
0528 static const char *aer_agent_string[] = {
0529 "Receiver ID",
0530 "Requester ID",
0531 "Completer ID",
0532 "Transmitter ID"
0533 };
0534
0535 #define aer_stats_dev_attr(name, stats_array, strings_array, \
0536 total_string, total_field) \
0537 static ssize_t \
0538 name##_show(struct device *dev, struct device_attribute *attr, \
0539 char *buf) \
0540 { \
0541 unsigned int i; \
0542 struct pci_dev *pdev = to_pci_dev(dev); \
0543 u64 *stats = pdev->aer_stats->stats_array; \
0544 size_t len = 0; \
0545 \
0546 for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
0547 if (strings_array[i]) \
0548 len += sysfs_emit_at(buf, len, "%s %llu\n", \
0549 strings_array[i], \
0550 stats[i]); \
0551 else if (stats[i]) \
0552 len += sysfs_emit_at(buf, len, \
0553 #stats_array "_bit[%d] %llu\n",\
0554 i, stats[i]); \
0555 } \
0556 len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \
0557 pdev->aer_stats->total_field); \
0558 return len; \
0559 } \
0560 static DEVICE_ATTR_RO(name)
0561
0562 aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
0563 aer_correctable_error_string, "ERR_COR",
0564 dev_total_cor_errs);
0565 aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
0566 aer_uncorrectable_error_string, "ERR_FATAL",
0567 dev_total_fatal_errs);
0568 aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
0569 aer_uncorrectable_error_string, "ERR_NONFATAL",
0570 dev_total_nonfatal_errs);
0571
0572 #define aer_stats_rootport_attr(name, field) \
0573 static ssize_t \
0574 name##_show(struct device *dev, struct device_attribute *attr, \
0575 char *buf) \
0576 { \
0577 struct pci_dev *pdev = to_pci_dev(dev); \
0578 return sysfs_emit(buf, "%llu\n", pdev->aer_stats->field); \
0579 } \
0580 static DEVICE_ATTR_RO(name)
0581
0582 aer_stats_rootport_attr(aer_rootport_total_err_cor,
0583 rootport_total_cor_errs);
0584 aer_stats_rootport_attr(aer_rootport_total_err_fatal,
0585 rootport_total_fatal_errs);
0586 aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
0587 rootport_total_nonfatal_errs);
0588
0589 static struct attribute *aer_stats_attrs[] __ro_after_init = {
0590 &dev_attr_aer_dev_correctable.attr,
0591 &dev_attr_aer_dev_fatal.attr,
0592 &dev_attr_aer_dev_nonfatal.attr,
0593 &dev_attr_aer_rootport_total_err_cor.attr,
0594 &dev_attr_aer_rootport_total_err_fatal.attr,
0595 &dev_attr_aer_rootport_total_err_nonfatal.attr,
0596 NULL
0597 };
0598
0599 static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
0600 struct attribute *a, int n)
0601 {
0602 struct device *dev = kobj_to_dev(kobj);
0603 struct pci_dev *pdev = to_pci_dev(dev);
0604
0605 if (!pdev->aer_stats)
0606 return 0;
0607
0608 if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
0609 a == &dev_attr_aer_rootport_total_err_fatal.attr ||
0610 a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
0611 ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
0612 (pci_pcie_type(pdev) != PCI_EXP_TYPE_RC_EC)))
0613 return 0;
0614
0615 return a->mode;
0616 }
0617
0618 const struct attribute_group aer_stats_attr_group = {
0619 .attrs = aer_stats_attrs,
0620 .is_visible = aer_stats_attrs_are_visible,
0621 };
0622
0623 static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
0624 struct aer_err_info *info)
0625 {
0626 unsigned long status = info->status & ~info->mask;
0627 int i, max = -1;
0628 u64 *counter = NULL;
0629 struct aer_stats *aer_stats = pdev->aer_stats;
0630
0631 if (!aer_stats)
0632 return;
0633
0634 switch (info->severity) {
0635 case AER_CORRECTABLE:
0636 aer_stats->dev_total_cor_errs++;
0637 counter = &aer_stats->dev_cor_errs[0];
0638 max = AER_MAX_TYPEOF_COR_ERRS;
0639 break;
0640 case AER_NONFATAL:
0641 aer_stats->dev_total_nonfatal_errs++;
0642 counter = &aer_stats->dev_nonfatal_errs[0];
0643 max = AER_MAX_TYPEOF_UNCOR_ERRS;
0644 break;
0645 case AER_FATAL:
0646 aer_stats->dev_total_fatal_errs++;
0647 counter = &aer_stats->dev_fatal_errs[0];
0648 max = AER_MAX_TYPEOF_UNCOR_ERRS;
0649 break;
0650 }
0651
0652 for_each_set_bit(i, &status, max)
0653 counter[i]++;
0654 }
0655
0656 static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
0657 struct aer_err_source *e_src)
0658 {
0659 struct aer_stats *aer_stats = pdev->aer_stats;
0660
0661 if (!aer_stats)
0662 return;
0663
0664 if (e_src->status & PCI_ERR_ROOT_COR_RCV)
0665 aer_stats->rootport_total_cor_errs++;
0666
0667 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
0668 if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
0669 aer_stats->rootport_total_fatal_errs++;
0670 else
0671 aer_stats->rootport_total_nonfatal_errs++;
0672 }
0673 }
0674
0675 static void __print_tlp_header(struct pci_dev *dev,
0676 struct aer_header_log_regs *t)
0677 {
0678 pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
0679 t->dw0, t->dw1, t->dw2, t->dw3);
0680 }
0681
0682 static void __aer_print_error(struct pci_dev *dev,
0683 struct aer_err_info *info)
0684 {
0685 const char **strings;
0686 unsigned long status = info->status & ~info->mask;
0687 const char *level, *errmsg;
0688 int i;
0689
0690 if (info->severity == AER_CORRECTABLE) {
0691 strings = aer_correctable_error_string;
0692 level = KERN_WARNING;
0693 } else {
0694 strings = aer_uncorrectable_error_string;
0695 level = KERN_ERR;
0696 }
0697
0698 for_each_set_bit(i, &status, 32) {
0699 errmsg = strings[i];
0700 if (!errmsg)
0701 errmsg = "Unknown Error Bit";
0702
0703 pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
0704 info->first_error == i ? " (First)" : "");
0705 }
0706 pci_dev_aer_stats_incr(dev, info);
0707 }
0708
0709 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
0710 {
0711 int layer, agent;
0712 int id = ((dev->bus->number << 8) | dev->devfn);
0713 const char *level;
0714
0715 if (!info->status) {
0716 pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
0717 aer_error_severity_string[info->severity]);
0718 goto out;
0719 }
0720
0721 layer = AER_GET_LAYER_ERROR(info->severity, info->status);
0722 agent = AER_GET_AGENT(info->severity, info->status);
0723
0724 level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
0725
0726 pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
0727 aer_error_severity_string[info->severity],
0728 aer_error_layer[layer], aer_agent_string[agent]);
0729
0730 pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
0731 dev->vendor, dev->device, info->status, info->mask);
0732
0733 __aer_print_error(dev, info);
0734
0735 if (info->tlp_header_valid)
0736 __print_tlp_header(dev, &info->tlp);
0737
0738 out:
0739 if (info->id && info->error_dev_num > 1 && info->id == id)
0740 pci_err(dev, " Error of this Agent is reported first\n");
0741
0742 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
0743 info->severity, info->tlp_header_valid, &info->tlp);
0744 }
0745
0746 static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
0747 {
0748 u8 bus = info->id >> 8;
0749 u8 devfn = info->id & 0xff;
0750
0751 pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
0752 info->multi_error_valid ? "Multiple " : "",
0753 aer_error_severity_string[info->severity],
0754 pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
0755 PCI_FUNC(devfn));
0756 }
0757
0758 #ifdef CONFIG_ACPI_APEI_PCIEAER
0759 int cper_severity_to_aer(int cper_severity)
0760 {
0761 switch (cper_severity) {
0762 case CPER_SEV_RECOVERABLE:
0763 return AER_NONFATAL;
0764 case CPER_SEV_FATAL:
0765 return AER_FATAL;
0766 default:
0767 return AER_CORRECTABLE;
0768 }
0769 }
0770 EXPORT_SYMBOL_GPL(cper_severity_to_aer);
0771
0772 void cper_print_aer(struct pci_dev *dev, int aer_severity,
0773 struct aer_capability_regs *aer)
0774 {
0775 int layer, agent, tlp_header_valid = 0;
0776 u32 status, mask;
0777 struct aer_err_info info;
0778
0779 if (aer_severity == AER_CORRECTABLE) {
0780 status = aer->cor_status;
0781 mask = aer->cor_mask;
0782 } else {
0783 status = aer->uncor_status;
0784 mask = aer->uncor_mask;
0785 tlp_header_valid = status & AER_LOG_TLP_MASKS;
0786 }
0787
0788 layer = AER_GET_LAYER_ERROR(aer_severity, status);
0789 agent = AER_GET_AGENT(aer_severity, status);
0790
0791 memset(&info, 0, sizeof(info));
0792 info.severity = aer_severity;
0793 info.status = status;
0794 info.mask = mask;
0795 info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
0796
0797 pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
0798 __aer_print_error(dev, &info);
0799 pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
0800 aer_error_layer[layer], aer_agent_string[agent]);
0801
0802 if (aer_severity != AER_CORRECTABLE)
0803 pci_err(dev, "aer_uncor_severity: 0x%08x\n",
0804 aer->uncor_severity);
0805
0806 if (tlp_header_valid)
0807 __print_tlp_header(dev, &aer->header_log);
0808
0809 trace_aer_event(dev_name(&dev->dev), (status & ~mask),
0810 aer_severity, tlp_header_valid, &aer->header_log);
0811 }
0812 #endif
0813
0814
0815
0816
0817
0818
0819 static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
0820 {
0821 if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
0822 e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
0823 e_info->error_dev_num++;
0824 return 0;
0825 }
0826 return -ENOSPC;
0827 }
0828
0829
0830
0831
0832
0833
0834 static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
0835 {
0836 int aer = dev->aer_cap;
0837 u32 status, mask;
0838 u16 reg16;
0839
0840
0841
0842
0843
0844 if ((PCI_BUS_NUM(e_info->id) != 0) &&
0845 !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
0846
0847 if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
0848 return true;
0849
0850
0851 if (!e_info->multi_error_valid)
0852 return false;
0853 }
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863 if (atomic_read(&dev->enable_cnt) == 0)
0864 return false;
0865
0866
0867 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, ®16);
0868 if (!(reg16 & PCI_EXP_AER_FLAGS))
0869 return false;
0870
0871 if (!aer)
0872 return false;
0873
0874
0875 if (e_info->severity == AER_CORRECTABLE) {
0876 pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
0877 pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask);
0878 } else {
0879 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
0880 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask);
0881 }
0882 if (status & ~mask)
0883 return true;
0884
0885 return false;
0886 }
0887
0888 static int find_device_iter(struct pci_dev *dev, void *data)
0889 {
0890 struct aer_err_info *e_info = (struct aer_err_info *)data;
0891
0892 if (is_error_source(dev, e_info)) {
0893
0894 if (add_error_device(e_info, dev)) {
0895
0896
0897 return 1;
0898 }
0899
0900
0901 if (!e_info->multi_error_valid)
0902 return 1;
0903 }
0904 return 0;
0905 }
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919 static bool find_source_device(struct pci_dev *parent,
0920 struct aer_err_info *e_info)
0921 {
0922 struct pci_dev *dev = parent;
0923 int result;
0924
0925
0926 e_info->error_dev_num = 0;
0927
0928
0929 result = find_device_iter(dev, e_info);
0930 if (result)
0931 return true;
0932
0933 if (pci_pcie_type(parent) == PCI_EXP_TYPE_RC_EC)
0934 pcie_walk_rcec(parent, find_device_iter, e_info);
0935 else
0936 pci_walk_bus(parent->subordinate, find_device_iter, e_info);
0937
0938 if (!e_info->error_dev_num) {
0939 pci_info(parent, "can't find device of ID%04x\n", e_info->id);
0940 return false;
0941 }
0942 return true;
0943 }
0944
0945
0946
0947
0948
0949
0950
0951
0952 static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
0953 {
0954 int aer = dev->aer_cap;
0955
0956 if (info->severity == AER_CORRECTABLE) {
0957
0958
0959
0960
0961 if (aer)
0962 pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS,
0963 info->status);
0964 if (pcie_aer_is_native(dev))
0965 pcie_clear_device_status(dev);
0966 } else if (info->severity == AER_NONFATAL)
0967 pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
0968 else if (info->severity == AER_FATAL)
0969 pcie_do_recovery(dev, pci_channel_io_frozen, aer_root_reset);
0970 pci_dev_put(dev);
0971 }
0972
0973 #ifdef CONFIG_ACPI_APEI_PCIEAER
0974
0975 #define AER_RECOVER_RING_ORDER 4
0976 #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
0977
0978 struct aer_recover_entry {
0979 u8 bus;
0980 u8 devfn;
0981 u16 domain;
0982 int severity;
0983 struct aer_capability_regs *regs;
0984 };
0985
0986 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
0987 AER_RECOVER_RING_SIZE);
0988
0989 static void aer_recover_work_func(struct work_struct *work)
0990 {
0991 struct aer_recover_entry entry;
0992 struct pci_dev *pdev;
0993
0994 while (kfifo_get(&aer_recover_ring, &entry)) {
0995 pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
0996 entry.devfn);
0997 if (!pdev) {
0998 pr_err("no pci_dev for %04x:%02x:%02x.%x\n",
0999 entry.domain, entry.bus,
1000 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
1001 continue;
1002 }
1003 cper_print_aer(pdev, entry.severity, entry.regs);
1004 if (entry.severity == AER_NONFATAL)
1005 pcie_do_recovery(pdev, pci_channel_io_normal,
1006 aer_root_reset);
1007 else if (entry.severity == AER_FATAL)
1008 pcie_do_recovery(pdev, pci_channel_io_frozen,
1009 aer_root_reset);
1010 pci_dev_put(pdev);
1011 }
1012 }
1013
1014
1015
1016
1017
1018
1019 static DEFINE_SPINLOCK(aer_recover_ring_lock);
1020 static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
1021
1022 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
1023 int severity, struct aer_capability_regs *aer_regs)
1024 {
1025 struct aer_recover_entry entry = {
1026 .bus = bus,
1027 .devfn = devfn,
1028 .domain = domain,
1029 .severity = severity,
1030 .regs = aer_regs,
1031 };
1032
1033 if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1,
1034 &aer_recover_ring_lock))
1035 schedule_work(&aer_recover_work);
1036 else
1037 pr_err("buffer overflow in recovery for %04x:%02x:%02x.%x\n",
1038 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1039 }
1040 EXPORT_SYMBOL_GPL(aer_recover_queue);
1041 #endif
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
1053 {
1054 int type = pci_pcie_type(dev);
1055 int aer = dev->aer_cap;
1056 int temp;
1057
1058
1059 info->status = 0;
1060 info->tlp_header_valid = 0;
1061
1062
1063 if (!aer)
1064 return 0;
1065
1066 if (info->severity == AER_CORRECTABLE) {
1067 pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS,
1068 &info->status);
1069 pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK,
1070 &info->mask);
1071 if (!(info->status & ~info->mask))
1072 return 0;
1073 } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
1074 type == PCI_EXP_TYPE_RC_EC ||
1075 type == PCI_EXP_TYPE_DOWNSTREAM ||
1076 info->severity == AER_NONFATAL) {
1077
1078
1079 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
1080 &info->status);
1081 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
1082 &info->mask);
1083 if (!(info->status & ~info->mask))
1084 return 0;
1085
1086
1087 pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
1088 info->first_error = PCI_ERR_CAP_FEP(temp);
1089
1090 if (info->status & AER_LOG_TLP_MASKS) {
1091 info->tlp_header_valid = 1;
1092 pci_read_config_dword(dev,
1093 aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
1094 pci_read_config_dword(dev,
1095 aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
1096 pci_read_config_dword(dev,
1097 aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
1098 pci_read_config_dword(dev,
1099 aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
1100 }
1101 }
1102
1103 return 1;
1104 }
1105
1106 static inline void aer_process_err_devices(struct aer_err_info *e_info)
1107 {
1108 int i;
1109
1110
1111 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
1112 if (aer_get_device_error_info(e_info->dev[i], e_info))
1113 aer_print_error(e_info->dev[i], e_info);
1114 }
1115 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
1116 if (aer_get_device_error_info(e_info->dev[i], e_info))
1117 handle_error_source(e_info->dev[i], e_info);
1118 }
1119 }
1120
1121
1122
1123
1124
1125
1126 static void aer_isr_one_error(struct aer_rpc *rpc,
1127 struct aer_err_source *e_src)
1128 {
1129 struct pci_dev *pdev = rpc->rpd;
1130 struct aer_err_info e_info;
1131
1132 pci_rootport_aer_stats_incr(pdev, e_src);
1133
1134
1135
1136
1137
1138 if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
1139 e_info.id = ERR_COR_ID(e_src->id);
1140 e_info.severity = AER_CORRECTABLE;
1141
1142 if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
1143 e_info.multi_error_valid = 1;
1144 else
1145 e_info.multi_error_valid = 0;
1146 aer_print_port_info(pdev, &e_info);
1147
1148 if (find_source_device(pdev, &e_info))
1149 aer_process_err_devices(&e_info);
1150 }
1151
1152 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
1153 e_info.id = ERR_UNCOR_ID(e_src->id);
1154
1155 if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
1156 e_info.severity = AER_FATAL;
1157 else
1158 e_info.severity = AER_NONFATAL;
1159
1160 if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
1161 e_info.multi_error_valid = 1;
1162 else
1163 e_info.multi_error_valid = 0;
1164
1165 aer_print_port_info(pdev, &e_info);
1166
1167 if (find_source_device(pdev, &e_info))
1168 aer_process_err_devices(&e_info);
1169 }
1170 }
1171
1172
1173
1174
1175
1176
1177
1178
1179 static irqreturn_t aer_isr(int irq, void *context)
1180 {
1181 struct pcie_device *dev = (struct pcie_device *)context;
1182 struct aer_rpc *rpc = get_service_data(dev);
1183 struct aer_err_source e_src;
1184
1185 if (kfifo_is_empty(&rpc->aer_fifo))
1186 return IRQ_NONE;
1187
1188 while (kfifo_get(&rpc->aer_fifo, &e_src))
1189 aer_isr_one_error(rpc, &e_src);
1190 return IRQ_HANDLED;
1191 }
1192
1193
1194
1195
1196
1197
1198
1199
1200 static irqreturn_t aer_irq(int irq, void *context)
1201 {
1202 struct pcie_device *pdev = (struct pcie_device *)context;
1203 struct aer_rpc *rpc = get_service_data(pdev);
1204 struct pci_dev *rp = rpc->rpd;
1205 int aer = rp->aer_cap;
1206 struct aer_err_source e_src = {};
1207
1208 pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status);
1209 if (!(e_src.status & AER_ERR_STATUS_MASK))
1210 return IRQ_NONE;
1211
1212 pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
1213 pci_write_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, e_src.status);
1214
1215 if (!kfifo_put(&rpc->aer_fifo, e_src))
1216 return IRQ_HANDLED;
1217
1218 return IRQ_WAKE_THREAD;
1219 }
1220
1221 static int set_device_error_reporting(struct pci_dev *dev, void *data)
1222 {
1223 bool enable = *((bool *)data);
1224 int type = pci_pcie_type(dev);
1225
1226 if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
1227 (type == PCI_EXP_TYPE_RC_EC) ||
1228 (type == PCI_EXP_TYPE_UPSTREAM) ||
1229 (type == PCI_EXP_TYPE_DOWNSTREAM)) {
1230 if (enable)
1231 pci_enable_pcie_error_reporting(dev);
1232 else
1233 pci_disable_pcie_error_reporting(dev);
1234 }
1235
1236 return 0;
1237 }
1238
1239
1240
1241
1242
1243
1244 static void set_downstream_devices_error_reporting(struct pci_dev *dev,
1245 bool enable)
1246 {
1247 set_device_error_reporting(dev, &enable);
1248
1249 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC)
1250 pcie_walk_rcec(dev, set_device_error_reporting, &enable);
1251 else if (dev->subordinate)
1252 pci_walk_bus(dev->subordinate, set_device_error_reporting,
1253 &enable);
1254
1255 }
1256
1257
1258
1259
1260
1261
1262
1263 static void aer_enable_rootport(struct aer_rpc *rpc)
1264 {
1265 struct pci_dev *pdev = rpc->rpd;
1266 int aer = pdev->aer_cap;
1267 u16 reg16;
1268 u32 reg32;
1269
1270
1271 pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, ®16);
1272 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16);
1273
1274
1275 pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
1276 SYSTEM_ERROR_INTR_ON_MESG_MASK);
1277
1278
1279 pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, ®32);
1280 pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, reg32);
1281 pci_read_config_dword(pdev, aer + PCI_ERR_COR_STATUS, ®32);
1282 pci_write_config_dword(pdev, aer + PCI_ERR_COR_STATUS, reg32);
1283 pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, ®32);
1284 pci_write_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, reg32);
1285
1286
1287
1288
1289
1290 set_downstream_devices_error_reporting(pdev, true);
1291
1292
1293 pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, ®32);
1294 reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
1295 pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
1296 }
1297
1298
1299
1300
1301
1302
1303
1304 static void aer_disable_rootport(struct aer_rpc *rpc)
1305 {
1306 struct pci_dev *pdev = rpc->rpd;
1307 int aer = pdev->aer_cap;
1308 u32 reg32;
1309
1310
1311
1312
1313
1314 set_downstream_devices_error_reporting(pdev, false);
1315
1316
1317 pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, ®32);
1318 reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
1319 pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
1320
1321
1322 pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, ®32);
1323 pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, reg32);
1324 }
1325
1326
1327
1328
1329
1330
1331
1332 static void aer_remove(struct pcie_device *dev)
1333 {
1334 struct aer_rpc *rpc = get_service_data(dev);
1335
1336 aer_disable_rootport(rpc);
1337 }
1338
1339
1340
1341
1342
1343
1344
1345 static int aer_probe(struct pcie_device *dev)
1346 {
1347 int status;
1348 struct aer_rpc *rpc;
1349 struct device *device = &dev->device;
1350 struct pci_dev *port = dev->port;
1351
1352 BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) <
1353 AER_MAX_TYPEOF_COR_ERRS);
1354 BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) <
1355 AER_MAX_TYPEOF_UNCOR_ERRS);
1356
1357
1358 if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&
1359 (pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT))
1360 return -ENODEV;
1361
1362 rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
1363 if (!rpc)
1364 return -ENOMEM;
1365
1366 rpc->rpd = port;
1367 INIT_KFIFO(rpc->aer_fifo);
1368 set_service_data(dev, rpc);
1369
1370 status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
1371 IRQF_SHARED, "aerdrv", dev);
1372 if (status) {
1373 pci_err(port, "request AER IRQ %d failed\n", dev->irq);
1374 return status;
1375 }
1376
1377 aer_enable_rootport(rpc);
1378 pci_info(port, "enabled with IRQ %d\n", dev->irq);
1379 return 0;
1380 }
1381
1382
1383
1384
1385
1386
1387
1388 static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
1389 {
1390 int type = pci_pcie_type(dev);
1391 struct pci_dev *root;
1392 int aer;
1393 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
1394 u32 reg32;
1395 int rc;
1396
1397
1398
1399
1400
1401
1402 if (type == PCI_EXP_TYPE_RC_END)
1403 root = dev->rcec;
1404 else
1405 root = pcie_find_root_port(dev);
1406
1407
1408
1409
1410
1411
1412 aer = root ? root->aer_cap : 0;
1413
1414 if ((host->native_aer || pcie_ports_native) && aer) {
1415
1416 pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32);
1417 reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
1418 pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
1419 }
1420
1421 if (type == PCI_EXP_TYPE_RC_EC || type == PCI_EXP_TYPE_RC_END) {
1422 rc = pcie_reset_flr(dev, PCI_RESET_DO_RESET);
1423 if (!rc)
1424 pci_info(dev, "has been reset\n");
1425 else
1426 pci_info(dev, "not reset (no FLR support: %d)\n", rc);
1427 } else {
1428 rc = pci_bus_error_reset(dev);
1429 pci_info(dev, "%s Port link has been reset (%d)\n",
1430 pci_is_root_bus(dev->bus) ? "Root" : "Downstream", rc);
1431 }
1432
1433 if ((host->native_aer || pcie_ports_native) && aer) {
1434
1435 pci_read_config_dword(root, aer + PCI_ERR_ROOT_STATUS, ®32);
1436 pci_write_config_dword(root, aer + PCI_ERR_ROOT_STATUS, reg32);
1437
1438
1439 pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32);
1440 reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
1441 pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
1442 }
1443
1444 return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1445 }
1446
1447 static struct pcie_port_service_driver aerdriver = {
1448 .name = "aer",
1449 .port_type = PCIE_ANY_PORT,
1450 .service = PCIE_PORT_SERVICE_AER,
1451
1452 .probe = aer_probe,
1453 .remove = aer_remove,
1454 };
1455
1456
1457
1458
1459
1460
1461 int __init pcie_aer_init(void)
1462 {
1463 if (!pci_aer_available())
1464 return -ENXIO;
1465 return pcie_port_service_register(&aerdriver);
1466 }