0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/pci.h>
0010 #include <linux/delay.h>
0011 #include <linux/string.h>
0012 #include <linux/init.h>
0013 #include <linux/irq.h>
0014 #include <linux/io.h>
0015 #include <linux/msi.h>
0016 #include <linux/iommu.h>
0017 #include <linux/sched/mm.h>
0018
0019 #include <asm/sections.h>
0020 #include <asm/io.h>
0021 #include <asm/pci-bridge.h>
0022 #include <asm/machdep.h>
0023 #include <asm/msi_bitmap.h>
0024 #include <asm/ppc-pci.h>
0025 #include <asm/pnv-pci.h>
0026 #include <asm/opal.h>
0027 #include <asm/iommu.h>
0028 #include <asm/tce.h>
0029 #include <asm/firmware.h>
0030 #include <asm/eeh_event.h>
0031 #include <asm/eeh.h>
0032
0033 #include "powernv.h"
0034 #include "pci.h"
0035
0036 static DEFINE_MUTEX(tunnel_mutex);
0037
0038 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
0039 {
0040 struct device_node *node = np;
0041 u32 bdfn;
0042 u64 phbid;
0043 int ret;
0044
0045 ret = of_property_read_u32(np, "reg", &bdfn);
0046 if (ret)
0047 return -ENXIO;
0048
0049 bdfn = ((bdfn & 0x00ffff00) >> 8);
0050 for (node = np; node; node = of_get_parent(node)) {
0051 if (!PCI_DN(node)) {
0052 of_node_put(node);
0053 break;
0054 }
0055
0056 if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
0057 !of_device_is_compatible(node, "ibm,ioda3-phb") &&
0058 !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
0059 of_node_put(node);
0060 continue;
0061 }
0062
0063 ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
0064 if (ret) {
0065 of_node_put(node);
0066 return -ENXIO;
0067 }
0068
0069 if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
0070 *id = PCI_PHB_SLOT_ID(phbid);
0071 else
0072 *id = PCI_SLOT_ID(phbid, bdfn);
0073 return 0;
0074 }
0075
0076 return -ENODEV;
0077 }
0078 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
0079
0080 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
0081 {
0082 int64_t rc;
0083
0084 if (!opal_check_token(OPAL_GET_DEVICE_TREE))
0085 return -ENXIO;
0086
0087 rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
0088 if (rc < OPAL_SUCCESS)
0089 return -EIO;
0090
0091 return rc;
0092 }
0093 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
0094
0095 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
0096 {
0097 int64_t rc;
0098
0099 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
0100 return -ENXIO;
0101
0102 rc = opal_pci_get_presence_state(id, (uint64_t)state);
0103 if (rc != OPAL_SUCCESS)
0104 return -EIO;
0105
0106 return 0;
0107 }
0108 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
0109
0110 int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
0111 {
0112 int64_t rc;
0113
0114 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
0115 return -ENXIO;
0116
0117 rc = opal_pci_get_power_state(id, (uint64_t)state);
0118 if (rc != OPAL_SUCCESS)
0119 return -EIO;
0120
0121 return 0;
0122 }
0123 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
0124
0125 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
0126 {
0127 struct opal_msg m;
0128 int token, ret;
0129 int64_t rc;
0130
0131 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
0132 return -ENXIO;
0133
0134 token = opal_async_get_token_interruptible();
0135 if (unlikely(token < 0))
0136 return token;
0137
0138 rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
0139 if (rc == OPAL_SUCCESS) {
0140 ret = 0;
0141 goto exit;
0142 } else if (rc != OPAL_ASYNC_COMPLETION) {
0143 ret = -EIO;
0144 goto exit;
0145 }
0146
0147 ret = opal_async_wait_response(token, &m);
0148 if (ret < 0)
0149 goto exit;
0150
0151 if (msg) {
0152 ret = 1;
0153 memcpy(msg, &m, sizeof(m));
0154 }
0155
0156 exit:
0157 opal_async_release_token(token);
0158 return ret;
0159 }
0160 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
0161
0162
0163 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
0164 {
0165 __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
0166 bool dup = false;
0167 int i;
0168
0169 for (i = 0; i < pest_size; i++) {
0170 __be64 peA = be64_to_cpu(pestA[i]);
0171 __be64 peB = be64_to_cpu(pestB[i]);
0172
0173 if (peA != prevA || peB != prevB) {
0174 if (dup) {
0175 pr_info("PE[..%03x] A/B: as above\n", i-1);
0176 dup = false;
0177 }
0178 prevA = peA;
0179 prevB = peB;
0180 if (peA & PNV_IODA_STOPPED_STATE ||
0181 peB & PNV_IODA_STOPPED_STATE)
0182 pr_info("PE[%03x] A/B: %016llx %016llx\n",
0183 i, peA, peB);
0184 } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
0185 peB & PNV_IODA_STOPPED_STATE)) {
0186 dup = true;
0187 }
0188 }
0189 }
0190
0191 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
0192 struct OpalIoPhbErrorCommon *common)
0193 {
0194 struct OpalIoP7IOCPhbErrorData *data;
0195
0196 data = (struct OpalIoP7IOCPhbErrorData *)common;
0197 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
0198 hose->global_number, be32_to_cpu(common->version));
0199
0200 if (data->brdgCtl)
0201 pr_info("brdgCtl: %08x\n",
0202 be32_to_cpu(data->brdgCtl));
0203 if (data->portStatusReg || data->rootCmplxStatus ||
0204 data->busAgentStatus)
0205 pr_info("UtlSts: %08x %08x %08x\n",
0206 be32_to_cpu(data->portStatusReg),
0207 be32_to_cpu(data->rootCmplxStatus),
0208 be32_to_cpu(data->busAgentStatus));
0209 if (data->deviceStatus || data->slotStatus ||
0210 data->linkStatus || data->devCmdStatus ||
0211 data->devSecStatus)
0212 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
0213 be32_to_cpu(data->deviceStatus),
0214 be32_to_cpu(data->slotStatus),
0215 be32_to_cpu(data->linkStatus),
0216 be32_to_cpu(data->devCmdStatus),
0217 be32_to_cpu(data->devSecStatus));
0218 if (data->rootErrorStatus || data->uncorrErrorStatus ||
0219 data->corrErrorStatus)
0220 pr_info("RootErrSts: %08x %08x %08x\n",
0221 be32_to_cpu(data->rootErrorStatus),
0222 be32_to_cpu(data->uncorrErrorStatus),
0223 be32_to_cpu(data->corrErrorStatus));
0224 if (data->tlpHdr1 || data->tlpHdr2 ||
0225 data->tlpHdr3 || data->tlpHdr4)
0226 pr_info("RootErrLog: %08x %08x %08x %08x\n",
0227 be32_to_cpu(data->tlpHdr1),
0228 be32_to_cpu(data->tlpHdr2),
0229 be32_to_cpu(data->tlpHdr3),
0230 be32_to_cpu(data->tlpHdr4));
0231 if (data->sourceId || data->errorClass ||
0232 data->correlator)
0233 pr_info("RootErrLog1: %08x %016llx %016llx\n",
0234 be32_to_cpu(data->sourceId),
0235 be64_to_cpu(data->errorClass),
0236 be64_to_cpu(data->correlator));
0237 if (data->p7iocPlssr || data->p7iocCsr)
0238 pr_info("PhbSts: %016llx %016llx\n",
0239 be64_to_cpu(data->p7iocPlssr),
0240 be64_to_cpu(data->p7iocCsr));
0241 if (data->lemFir)
0242 pr_info("Lem: %016llx %016llx %016llx\n",
0243 be64_to_cpu(data->lemFir),
0244 be64_to_cpu(data->lemErrorMask),
0245 be64_to_cpu(data->lemWOF));
0246 if (data->phbErrorStatus)
0247 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
0248 be64_to_cpu(data->phbErrorStatus),
0249 be64_to_cpu(data->phbFirstErrorStatus),
0250 be64_to_cpu(data->phbErrorLog0),
0251 be64_to_cpu(data->phbErrorLog1));
0252 if (data->mmioErrorStatus)
0253 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
0254 be64_to_cpu(data->mmioErrorStatus),
0255 be64_to_cpu(data->mmioFirstErrorStatus),
0256 be64_to_cpu(data->mmioErrorLog0),
0257 be64_to_cpu(data->mmioErrorLog1));
0258 if (data->dma0ErrorStatus)
0259 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
0260 be64_to_cpu(data->dma0ErrorStatus),
0261 be64_to_cpu(data->dma0FirstErrorStatus),
0262 be64_to_cpu(data->dma0ErrorLog0),
0263 be64_to_cpu(data->dma0ErrorLog1));
0264 if (data->dma1ErrorStatus)
0265 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
0266 be64_to_cpu(data->dma1ErrorStatus),
0267 be64_to_cpu(data->dma1FirstErrorStatus),
0268 be64_to_cpu(data->dma1ErrorLog0),
0269 be64_to_cpu(data->dma1ErrorLog1));
0270
0271 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
0272 }
0273
0274 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
0275 struct OpalIoPhbErrorCommon *common)
0276 {
0277 struct OpalIoPhb3ErrorData *data;
0278
0279 data = (struct OpalIoPhb3ErrorData*)common;
0280 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
0281 hose->global_number, be32_to_cpu(common->version));
0282 if (data->brdgCtl)
0283 pr_info("brdgCtl: %08x\n",
0284 be32_to_cpu(data->brdgCtl));
0285 if (data->portStatusReg || data->rootCmplxStatus ||
0286 data->busAgentStatus)
0287 pr_info("UtlSts: %08x %08x %08x\n",
0288 be32_to_cpu(data->portStatusReg),
0289 be32_to_cpu(data->rootCmplxStatus),
0290 be32_to_cpu(data->busAgentStatus));
0291 if (data->deviceStatus || data->slotStatus ||
0292 data->linkStatus || data->devCmdStatus ||
0293 data->devSecStatus)
0294 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
0295 be32_to_cpu(data->deviceStatus),
0296 be32_to_cpu(data->slotStatus),
0297 be32_to_cpu(data->linkStatus),
0298 be32_to_cpu(data->devCmdStatus),
0299 be32_to_cpu(data->devSecStatus));
0300 if (data->rootErrorStatus || data->uncorrErrorStatus ||
0301 data->corrErrorStatus)
0302 pr_info("RootErrSts: %08x %08x %08x\n",
0303 be32_to_cpu(data->rootErrorStatus),
0304 be32_to_cpu(data->uncorrErrorStatus),
0305 be32_to_cpu(data->corrErrorStatus));
0306 if (data->tlpHdr1 || data->tlpHdr2 ||
0307 data->tlpHdr3 || data->tlpHdr4)
0308 pr_info("RootErrLog: %08x %08x %08x %08x\n",
0309 be32_to_cpu(data->tlpHdr1),
0310 be32_to_cpu(data->tlpHdr2),
0311 be32_to_cpu(data->tlpHdr3),
0312 be32_to_cpu(data->tlpHdr4));
0313 if (data->sourceId || data->errorClass ||
0314 data->correlator)
0315 pr_info("RootErrLog1: %08x %016llx %016llx\n",
0316 be32_to_cpu(data->sourceId),
0317 be64_to_cpu(data->errorClass),
0318 be64_to_cpu(data->correlator));
0319 if (data->nFir)
0320 pr_info("nFir: %016llx %016llx %016llx\n",
0321 be64_to_cpu(data->nFir),
0322 be64_to_cpu(data->nFirMask),
0323 be64_to_cpu(data->nFirWOF));
0324 if (data->phbPlssr || data->phbCsr)
0325 pr_info("PhbSts: %016llx %016llx\n",
0326 be64_to_cpu(data->phbPlssr),
0327 be64_to_cpu(data->phbCsr));
0328 if (data->lemFir)
0329 pr_info("Lem: %016llx %016llx %016llx\n",
0330 be64_to_cpu(data->lemFir),
0331 be64_to_cpu(data->lemErrorMask),
0332 be64_to_cpu(data->lemWOF));
0333 if (data->phbErrorStatus)
0334 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
0335 be64_to_cpu(data->phbErrorStatus),
0336 be64_to_cpu(data->phbFirstErrorStatus),
0337 be64_to_cpu(data->phbErrorLog0),
0338 be64_to_cpu(data->phbErrorLog1));
0339 if (data->mmioErrorStatus)
0340 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
0341 be64_to_cpu(data->mmioErrorStatus),
0342 be64_to_cpu(data->mmioFirstErrorStatus),
0343 be64_to_cpu(data->mmioErrorLog0),
0344 be64_to_cpu(data->mmioErrorLog1));
0345 if (data->dma0ErrorStatus)
0346 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
0347 be64_to_cpu(data->dma0ErrorStatus),
0348 be64_to_cpu(data->dma0FirstErrorStatus),
0349 be64_to_cpu(data->dma0ErrorLog0),
0350 be64_to_cpu(data->dma0ErrorLog1));
0351 if (data->dma1ErrorStatus)
0352 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
0353 be64_to_cpu(data->dma1ErrorStatus),
0354 be64_to_cpu(data->dma1FirstErrorStatus),
0355 be64_to_cpu(data->dma1ErrorLog0),
0356 be64_to_cpu(data->dma1ErrorLog1));
0357
0358 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
0359 }
0360
0361 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
0362 struct OpalIoPhbErrorCommon *common)
0363 {
0364 struct OpalIoPhb4ErrorData *data;
0365
0366 data = (struct OpalIoPhb4ErrorData*)common;
0367 pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
0368 hose->global_number, be32_to_cpu(common->version));
0369 if (data->brdgCtl)
0370 pr_info("brdgCtl: %08x\n",
0371 be32_to_cpu(data->brdgCtl));
0372 if (data->deviceStatus || data->slotStatus ||
0373 data->linkStatus || data->devCmdStatus ||
0374 data->devSecStatus)
0375 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
0376 be32_to_cpu(data->deviceStatus),
0377 be32_to_cpu(data->slotStatus),
0378 be32_to_cpu(data->linkStatus),
0379 be32_to_cpu(data->devCmdStatus),
0380 be32_to_cpu(data->devSecStatus));
0381 if (data->rootErrorStatus || data->uncorrErrorStatus ||
0382 data->corrErrorStatus)
0383 pr_info("RootErrSts: %08x %08x %08x\n",
0384 be32_to_cpu(data->rootErrorStatus),
0385 be32_to_cpu(data->uncorrErrorStatus),
0386 be32_to_cpu(data->corrErrorStatus));
0387 if (data->tlpHdr1 || data->tlpHdr2 ||
0388 data->tlpHdr3 || data->tlpHdr4)
0389 pr_info("RootErrLog: %08x %08x %08x %08x\n",
0390 be32_to_cpu(data->tlpHdr1),
0391 be32_to_cpu(data->tlpHdr2),
0392 be32_to_cpu(data->tlpHdr3),
0393 be32_to_cpu(data->tlpHdr4));
0394 if (data->sourceId)
0395 pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId));
0396 if (data->nFir)
0397 pr_info("nFir: %016llx %016llx %016llx\n",
0398 be64_to_cpu(data->nFir),
0399 be64_to_cpu(data->nFirMask),
0400 be64_to_cpu(data->nFirWOF));
0401 if (data->phbPlssr || data->phbCsr)
0402 pr_info("PhbSts: %016llx %016llx\n",
0403 be64_to_cpu(data->phbPlssr),
0404 be64_to_cpu(data->phbCsr));
0405 if (data->lemFir)
0406 pr_info("Lem: %016llx %016llx %016llx\n",
0407 be64_to_cpu(data->lemFir),
0408 be64_to_cpu(data->lemErrorMask),
0409 be64_to_cpu(data->lemWOF));
0410 if (data->phbErrorStatus)
0411 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
0412 be64_to_cpu(data->phbErrorStatus),
0413 be64_to_cpu(data->phbFirstErrorStatus),
0414 be64_to_cpu(data->phbErrorLog0),
0415 be64_to_cpu(data->phbErrorLog1));
0416 if (data->phbTxeErrorStatus)
0417 pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n",
0418 be64_to_cpu(data->phbTxeErrorStatus),
0419 be64_to_cpu(data->phbTxeFirstErrorStatus),
0420 be64_to_cpu(data->phbTxeErrorLog0),
0421 be64_to_cpu(data->phbTxeErrorLog1));
0422 if (data->phbRxeArbErrorStatus)
0423 pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n",
0424 be64_to_cpu(data->phbRxeArbErrorStatus),
0425 be64_to_cpu(data->phbRxeArbFirstErrorStatus),
0426 be64_to_cpu(data->phbRxeArbErrorLog0),
0427 be64_to_cpu(data->phbRxeArbErrorLog1));
0428 if (data->phbRxeMrgErrorStatus)
0429 pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n",
0430 be64_to_cpu(data->phbRxeMrgErrorStatus),
0431 be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
0432 be64_to_cpu(data->phbRxeMrgErrorLog0),
0433 be64_to_cpu(data->phbRxeMrgErrorLog1));
0434 if (data->phbRxeTceErrorStatus)
0435 pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n",
0436 be64_to_cpu(data->phbRxeTceErrorStatus),
0437 be64_to_cpu(data->phbRxeTceFirstErrorStatus),
0438 be64_to_cpu(data->phbRxeTceErrorLog0),
0439 be64_to_cpu(data->phbRxeTceErrorLog1));
0440
0441 if (data->phbPblErrorStatus)
0442 pr_info("PblErr: %016llx %016llx %016llx %016llx\n",
0443 be64_to_cpu(data->phbPblErrorStatus),
0444 be64_to_cpu(data->phbPblFirstErrorStatus),
0445 be64_to_cpu(data->phbPblErrorLog0),
0446 be64_to_cpu(data->phbPblErrorLog1));
0447 if (data->phbPcieDlpErrorStatus)
0448 pr_info("PcieDlp: %016llx %016llx %016llx\n",
0449 be64_to_cpu(data->phbPcieDlpErrorLog1),
0450 be64_to_cpu(data->phbPcieDlpErrorLog2),
0451 be64_to_cpu(data->phbPcieDlpErrorStatus));
0452 if (data->phbRegbErrorStatus)
0453 pr_info("RegbErr: %016llx %016llx %016llx %016llx\n",
0454 be64_to_cpu(data->phbRegbErrorStatus),
0455 be64_to_cpu(data->phbRegbFirstErrorStatus),
0456 be64_to_cpu(data->phbRegbErrorLog0),
0457 be64_to_cpu(data->phbRegbErrorLog1));
0458
0459
0460 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
0461 }
0462
0463 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
0464 unsigned char *log_buff)
0465 {
0466 struct OpalIoPhbErrorCommon *common;
0467
0468 if (!hose || !log_buff)
0469 return;
0470
0471 common = (struct OpalIoPhbErrorCommon *)log_buff;
0472 switch (be32_to_cpu(common->ioType)) {
0473 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
0474 pnv_pci_dump_p7ioc_diag_data(hose, common);
0475 break;
0476 case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
0477 pnv_pci_dump_phb3_diag_data(hose, common);
0478 break;
0479 case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
0480 pnv_pci_dump_phb4_diag_data(hose, common);
0481 break;
0482 default:
0483 pr_warn("%s: Unrecognized ioType %d\n",
0484 __func__, be32_to_cpu(common->ioType));
0485 }
0486 }
0487
0488 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
0489 {
0490 unsigned long flags, rc;
0491 int has_diag, ret = 0;
0492
0493 spin_lock_irqsave(&phb->lock, flags);
0494
0495
0496 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
0497 phb->diag_data_size);
0498 has_diag = (rc == OPAL_SUCCESS);
0499
0500
0501 if (phb->unfreeze_pe) {
0502 ret = phb->unfreeze_pe(phb,
0503 pe_no,
0504 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
0505 } else {
0506 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
0507 pe_no,
0508 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
0509 if (rc) {
0510 pr_warn("%s: Failure %ld clearing frozen "
0511 "PHB#%x-PE#%x\n",
0512 __func__, rc, phb->hose->global_number,
0513 pe_no);
0514 ret = -EIO;
0515 }
0516 }
0517
0518
0519
0520
0521
0522
0523
0524 if (has_diag && ret)
0525 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
0526
0527 spin_unlock_irqrestore(&phb->lock, flags);
0528 }
0529
0530 static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
0531 {
0532 struct pnv_phb *phb = pdn->phb->private_data;
0533 u8 fstate = 0;
0534 __be16 pcierr = 0;
0535 unsigned int pe_no;
0536 s64 rc;
0537
0538
0539
0540
0541
0542
0543 pe_no = pdn->pe_number;
0544 if (pe_no == IODA_INVALID_PE) {
0545 pe_no = phb->ioda.reserved_pe_idx;
0546 }
0547
0548
0549
0550
0551
0552 if (phb->get_pe_state) {
0553 fstate = phb->get_pe_state(phb, pe_no);
0554 } else {
0555 rc = opal_pci_eeh_freeze_status(phb->opal_id,
0556 pe_no,
0557 &fstate,
0558 &pcierr,
0559 NULL);
0560 if (rc) {
0561 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
0562 __func__, rc, phb->hose->global_number, pe_no);
0563 return;
0564 }
0565 }
0566
0567 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
0568 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
0569
0570
0571 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
0572 fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
0573 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
0574
0575
0576
0577
0578 if (phb->freeze_pe)
0579 phb->freeze_pe(phb, pe_no);
0580
0581 pnv_pci_handle_eeh_config(phb, pe_no);
0582 }
0583 }
0584
0585 int pnv_pci_cfg_read(struct pci_dn *pdn,
0586 int where, int size, u32 *val)
0587 {
0588 struct pnv_phb *phb = pdn->phb->private_data;
0589 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
0590 s64 rc;
0591
0592 switch (size) {
0593 case 1: {
0594 u8 v8;
0595 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
0596 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
0597 break;
0598 }
0599 case 2: {
0600 __be16 v16;
0601 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
0602 &v16);
0603 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
0604 break;
0605 }
0606 case 4: {
0607 __be32 v32;
0608 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
0609 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
0610 break;
0611 }
0612 default:
0613 return PCIBIOS_FUNC_NOT_SUPPORTED;
0614 }
0615
0616 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
0617 __func__, pdn->busno, pdn->devfn, where, size, *val);
0618 return PCIBIOS_SUCCESSFUL;
0619 }
0620
0621 int pnv_pci_cfg_write(struct pci_dn *pdn,
0622 int where, int size, u32 val)
0623 {
0624 struct pnv_phb *phb = pdn->phb->private_data;
0625 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
0626
0627 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
0628 __func__, pdn->busno, pdn->devfn, where, size, val);
0629 switch (size) {
0630 case 1:
0631 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
0632 break;
0633 case 2:
0634 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
0635 break;
0636 case 4:
0637 opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
0638 break;
0639 default:
0640 return PCIBIOS_FUNC_NOT_SUPPORTED;
0641 }
0642
0643 return PCIBIOS_SUCCESSFUL;
0644 }
0645
0646 #ifdef CONFIG_EEH
0647 static bool pnv_pci_cfg_check(struct pci_dn *pdn)
0648 {
0649 struct eeh_dev *edev = NULL;
0650 struct pnv_phb *phb = pdn->phb->private_data;
0651
0652
0653 if (!(phb->flags & PNV_PHB_FLAG_EEH))
0654 return true;
0655
0656
0657 edev = pdn->edev;
0658 if (edev) {
0659 if (edev->pe &&
0660 (edev->pe->state & EEH_PE_CFG_BLOCKED))
0661 return false;
0662
0663 if (edev->mode & EEH_DEV_REMOVED)
0664 return false;
0665 }
0666
0667 return true;
0668 }
0669 #else
0670 static inline pnv_pci_cfg_check(struct pci_dn *pdn)
0671 {
0672 return true;
0673 }
0674 #endif
0675
0676 static int pnv_pci_read_config(struct pci_bus *bus,
0677 unsigned int devfn,
0678 int where, int size, u32 *val)
0679 {
0680 struct pci_dn *pdn;
0681 struct pnv_phb *phb;
0682 int ret;
0683
0684 *val = 0xFFFFFFFF;
0685 pdn = pci_get_pdn_by_devfn(bus, devfn);
0686 if (!pdn)
0687 return PCIBIOS_DEVICE_NOT_FOUND;
0688
0689 if (!pnv_pci_cfg_check(pdn))
0690 return PCIBIOS_DEVICE_NOT_FOUND;
0691
0692 ret = pnv_pci_cfg_read(pdn, where, size, val);
0693 phb = pdn->phb->private_data;
0694 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
0695 if (*val == EEH_IO_ERROR_VALUE(size) &&
0696 eeh_dev_check_failure(pdn->edev))
0697 return PCIBIOS_DEVICE_NOT_FOUND;
0698 } else {
0699 pnv_pci_config_check_eeh(pdn);
0700 }
0701
0702 return ret;
0703 }
0704
0705 static int pnv_pci_write_config(struct pci_bus *bus,
0706 unsigned int devfn,
0707 int where, int size, u32 val)
0708 {
0709 struct pci_dn *pdn;
0710 struct pnv_phb *phb;
0711 int ret;
0712
0713 pdn = pci_get_pdn_by_devfn(bus, devfn);
0714 if (!pdn)
0715 return PCIBIOS_DEVICE_NOT_FOUND;
0716
0717 if (!pnv_pci_cfg_check(pdn))
0718 return PCIBIOS_DEVICE_NOT_FOUND;
0719
0720 ret = pnv_pci_cfg_write(pdn, where, size, val);
0721 phb = pdn->phb->private_data;
0722 if (!(phb->flags & PNV_PHB_FLAG_EEH))
0723 pnv_pci_config_check_eeh(pdn);
0724
0725 return ret;
0726 }
0727
0728 struct pci_ops pnv_pci_ops = {
0729 .read = pnv_pci_read_config,
0730 .write = pnv_pci_write_config,
0731 };
0732
0733 struct iommu_table *pnv_pci_table_alloc(int nid)
0734 {
0735 struct iommu_table *tbl;
0736
0737 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
0738 if (!tbl)
0739 return NULL;
0740
0741 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
0742 kref_init(&tbl->it_kref);
0743
0744 return tbl;
0745 }
0746
0747 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
0748 {
0749 struct pci_controller *hose = pci_bus_to_host(dev->bus);
0750
0751 return of_node_get(hose->dn);
0752 }
0753 EXPORT_SYMBOL(pnv_pci_get_phb_node);
0754
0755 int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
0756 {
0757 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
0758 u64 tunnel_bar;
0759 __be64 val;
0760 int rc;
0761
0762 if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
0763 return -ENXIO;
0764 if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
0765 return -ENXIO;
0766
0767 mutex_lock(&tunnel_mutex);
0768 rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
0769 if (rc != OPAL_SUCCESS) {
0770 rc = -EIO;
0771 goto out;
0772 }
0773 tunnel_bar = be64_to_cpu(val);
0774 if (enable) {
0775
0776
0777
0778
0779 if (tunnel_bar) {
0780 if (tunnel_bar != addr)
0781 rc = -EBUSY;
0782 else
0783 rc = 0;
0784 goto out;
0785 }
0786 } else {
0787
0788
0789
0790
0791 if (tunnel_bar != addr) {
0792 rc = -EPERM;
0793 goto out;
0794 }
0795 addr = 0x0ULL;
0796 }
0797 rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
0798 rc = opal_error_code(rc);
0799 out:
0800 mutex_unlock(&tunnel_mutex);
0801 return rc;
0802 }
0803 EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
0804
0805 void pnv_pci_shutdown(void)
0806 {
0807 struct pci_controller *hose;
0808
0809 list_for_each_entry(hose, &hose_list, list_node)
0810 if (hose->controller_ops.shutdown)
0811 hose->controller_ops.shutdown(hose);
0812 }
0813
0814
0815 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
0816 {
0817 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
0818 }
0819 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
0820
0821 void __init pnv_pci_init(void)
0822 {
0823 struct device_node *np;
0824
0825 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
0826
0827
0828 if (!firmware_has_feature(FW_FEATURE_OPAL))
0829 return;
0830
0831 #ifdef CONFIG_PCIEPORTBUS
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845 pcie_ports_disabled = true;
0846 #endif
0847
0848
0849 for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
0850 pnv_pci_init_ioda_hub(np);
0851 }
0852
0853
0854 for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
0855 pnv_pci_init_ioda2_phb(np);
0856
0857
0858 for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
0859 pnv_pci_init_ioda2_phb(np);
0860
0861
0862 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
0863 pnv_pci_init_npu2_opencapi_phb(np);
0864
0865
0866 set_pci_dma_ops(&dma_iommu_ops);
0867 }
0868
0869 static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
0870 unsigned long action, void *data)
0871 {
0872 struct device *dev = data;
0873
0874 switch (action) {
0875 case BUS_NOTIFY_DEL_DEVICE:
0876 iommu_del_device(dev);
0877 return 0;
0878 default:
0879 return 0;
0880 }
0881 }
0882
0883 static struct notifier_block pnv_tce_iommu_bus_nb = {
0884 .notifier_call = pnv_tce_iommu_bus_notifier,
0885 };
0886
0887 static int __init pnv_tce_iommu_bus_notifier_init(void)
0888 {
0889 bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
0890 return 0;
0891 }
0892 machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);