0001
0002
0003
0004
0005
0006 #include <linux/pci_regs.h>
0007 #include <linux/pci_ids.h>
0008 #include <linux/device.h>
0009 #include <linux/module.h>
0010 #include <linux/kernel.h>
0011 #include <linux/slab.h>
0012 #include <linux/sort.h>
0013 #include <linux/pci.h>
0014 #include <linux/of.h>
0015 #include <linux/delay.h>
0016 #include <asm/opal.h>
0017 #include <asm/msi_bitmap.h>
0018 #include <asm/pnv-pci.h>
0019 #include <asm/io.h>
0020 #include <asm/reg.h>
0021
0022 #include "cxl.h"
0023 #include <misc/cxl.h>
0024
0025
0026 #define CXL_PCI_VSEC_ID 0x1280
0027 #define CXL_VSEC_MIN_SIZE 0x80
0028
0029 #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
0030 { \
0031 pci_read_config_word(dev, vsec + 0x6, dest); \
0032 *dest >>= 4; \
0033 }
0034 #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
0035 pci_read_config_byte(dev, vsec + 0x8, dest)
0036
0037 #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
0038 pci_read_config_byte(dev, vsec + 0x9, dest)
0039 #define CXL_STATUS_SECOND_PORT 0x80
0040 #define CXL_STATUS_MSI_X_FULL 0x40
0041 #define CXL_STATUS_MSI_X_SINGLE 0x20
0042 #define CXL_STATUS_FLASH_RW 0x08
0043 #define CXL_STATUS_FLASH_RO 0x04
0044 #define CXL_STATUS_LOADABLE_AFU 0x02
0045 #define CXL_STATUS_LOADABLE_PSL 0x01
0046
0047 #define CXL_UNSUPPORTED_FEATURES \
0048 (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
0049
0050 #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
0051 pci_read_config_byte(dev, vsec + 0xa, dest)
0052 #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
0053 pci_write_config_byte(dev, vsec + 0xa, val)
0054 #define CXL_VSEC_PROTOCOL_MASK 0xe0
0055 #define CXL_VSEC_PROTOCOL_1024TB 0x80
0056 #define CXL_VSEC_PROTOCOL_512TB 0x40
0057 #define CXL_VSEC_PROTOCOL_256TB 0x20
0058 #define CXL_VSEC_PROTOCOL_ENABLE 0x01
0059
0060 #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
0061 pci_read_config_word(dev, vsec + 0xc, dest)
0062 #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
0063 pci_read_config_byte(dev, vsec + 0xe, dest)
0064 #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
0065 pci_read_config_byte(dev, vsec + 0xf, dest)
0066 #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
0067 pci_read_config_word(dev, vsec + 0x10, dest)
0068
0069 #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
0070 pci_read_config_byte(dev, vsec + 0x13, dest)
0071 #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
0072 pci_write_config_byte(dev, vsec + 0x13, val)
0073 #define CXL_VSEC_USER_IMAGE_LOADED 0x80
0074 #define CXL_VSEC_PERST_LOADS_IMAGE 0x20
0075 #define CXL_VSEC_PERST_SELECT_USER 0x10
0076
0077 #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
0078 pci_read_config_dword(dev, vsec + 0x20, dest)
0079 #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
0080 pci_read_config_dword(dev, vsec + 0x24, dest)
0081 #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
0082 pci_read_config_dword(dev, vsec + 0x28, dest)
0083 #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
0084 pci_read_config_dword(dev, vsec + 0x2c, dest)
0085
0086
0087
0088
0089 #define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
0090 #define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
0091 #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
0092 #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
0093
0094 #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
0095 #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
0096 #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
0097 #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
0098 #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
0099 #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
0100 #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
0101 #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
0102 #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
0103 #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
0104 #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
0105 #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
0106 #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
0107 #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
0108 #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
0109 #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
0110 #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
0111 #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
0112 #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
0113 #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
0114
0115 static const struct pci_device_id cxl_pci_tbl[] = {
0116 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
0117 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
0118 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
0119 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
0120 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), },
0121 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), },
0122 { }
0123 };
0124 MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
0125
0126
0127
0128
0129
0130
0131 static inline resource_size_t p1_base(struct pci_dev *dev)
0132 {
0133 return pci_resource_start(dev, 2);
0134 }
0135
0136 static inline resource_size_t p1_size(struct pci_dev *dev)
0137 {
0138 return pci_resource_len(dev, 2);
0139 }
0140
0141 static inline resource_size_t p2_base(struct pci_dev *dev)
0142 {
0143 return pci_resource_start(dev, 0);
0144 }
0145
0146 static inline resource_size_t p2_size(struct pci_dev *dev)
0147 {
0148 return pci_resource_len(dev, 0);
0149 }
0150
0151 static int find_cxl_vsec(struct pci_dev *dev)
0152 {
0153 int vsec = 0;
0154 u16 val;
0155
0156 while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
0157 pci_read_config_word(dev, vsec + 0x4, &val);
0158 if (val == CXL_PCI_VSEC_ID)
0159 return vsec;
0160 }
0161 return 0;
0162
0163 }
0164
0165 static void dump_cxl_config_space(struct pci_dev *dev)
0166 {
0167 int vsec;
0168 u32 val;
0169
0170 dev_info(&dev->dev, "dump_cxl_config_space\n");
0171
0172 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
0173 dev_info(&dev->dev, "BAR0: %#.8x\n", val);
0174 pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
0175 dev_info(&dev->dev, "BAR1: %#.8x\n", val);
0176 pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
0177 dev_info(&dev->dev, "BAR2: %#.8x\n", val);
0178 pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
0179 dev_info(&dev->dev, "BAR3: %#.8x\n", val);
0180 pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
0181 dev_info(&dev->dev, "BAR4: %#.8x\n", val);
0182 pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
0183 dev_info(&dev->dev, "BAR5: %#.8x\n", val);
0184
0185 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
0186 p1_base(dev), p1_size(dev));
0187 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
0188 p2_base(dev), p2_size(dev));
0189 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
0190 pci_resource_start(dev, 4), pci_resource_len(dev, 4));
0191
0192 if (!(vsec = find_cxl_vsec(dev)))
0193 return;
0194
0195 #define show_reg(name, what) \
0196 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
0197
0198 pci_read_config_dword(dev, vsec + 0x0, &val);
0199 show_reg("Cap ID", (val >> 0) & 0xffff);
0200 show_reg("Cap Ver", (val >> 16) & 0xf);
0201 show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
0202 pci_read_config_dword(dev, vsec + 0x4, &val);
0203 show_reg("VSEC ID", (val >> 0) & 0xffff);
0204 show_reg("VSEC Rev", (val >> 16) & 0xf);
0205 show_reg("VSEC Length", (val >> 20) & 0xfff);
0206 pci_read_config_dword(dev, vsec + 0x8, &val);
0207 show_reg("Num AFUs", (val >> 0) & 0xff);
0208 show_reg("Status", (val >> 8) & 0xff);
0209 show_reg("Mode Control", (val >> 16) & 0xff);
0210 show_reg("Reserved", (val >> 24) & 0xff);
0211 pci_read_config_dword(dev, vsec + 0xc, &val);
0212 show_reg("PSL Rev", (val >> 0) & 0xffff);
0213 show_reg("CAIA Ver", (val >> 16) & 0xffff);
0214 pci_read_config_dword(dev, vsec + 0x10, &val);
0215 show_reg("Base Image Rev", (val >> 0) & 0xffff);
0216 show_reg("Reserved", (val >> 16) & 0x0fff);
0217 show_reg("Image Control", (val >> 28) & 0x3);
0218 show_reg("Reserved", (val >> 30) & 0x1);
0219 show_reg("Image Loaded", (val >> 31) & 0x1);
0220
0221 pci_read_config_dword(dev, vsec + 0x14, &val);
0222 show_reg("Reserved", val);
0223 pci_read_config_dword(dev, vsec + 0x18, &val);
0224 show_reg("Reserved", val);
0225 pci_read_config_dword(dev, vsec + 0x1c, &val);
0226 show_reg("Reserved", val);
0227
0228 pci_read_config_dword(dev, vsec + 0x20, &val);
0229 show_reg("AFU Descriptor Offset", val);
0230 pci_read_config_dword(dev, vsec + 0x24, &val);
0231 show_reg("AFU Descriptor Size", val);
0232 pci_read_config_dword(dev, vsec + 0x28, &val);
0233 show_reg("Problem State Offset", val);
0234 pci_read_config_dword(dev, vsec + 0x2c, &val);
0235 show_reg("Problem State Size", val);
0236
0237 pci_read_config_dword(dev, vsec + 0x30, &val);
0238 show_reg("Reserved", val);
0239 pci_read_config_dword(dev, vsec + 0x34, &val);
0240 show_reg("Reserved", val);
0241 pci_read_config_dword(dev, vsec + 0x38, &val);
0242 show_reg("Reserved", val);
0243 pci_read_config_dword(dev, vsec + 0x3c, &val);
0244 show_reg("Reserved", val);
0245
0246 pci_read_config_dword(dev, vsec + 0x40, &val);
0247 show_reg("PSL Programming Port", val);
0248 pci_read_config_dword(dev, vsec + 0x44, &val);
0249 show_reg("PSL Programming Control", val);
0250
0251 pci_read_config_dword(dev, vsec + 0x48, &val);
0252 show_reg("Reserved", val);
0253 pci_read_config_dword(dev, vsec + 0x4c, &val);
0254 show_reg("Reserved", val);
0255
0256 pci_read_config_dword(dev, vsec + 0x50, &val);
0257 show_reg("Flash Address Register", val);
0258 pci_read_config_dword(dev, vsec + 0x54, &val);
0259 show_reg("Flash Size Register", val);
0260 pci_read_config_dword(dev, vsec + 0x58, &val);
0261 show_reg("Flash Status/Control Register", val);
0262 pci_read_config_dword(dev, vsec + 0x58, &val);
0263 show_reg("Flash Data Port", val);
0264
0265 #undef show_reg
0266 }
0267
0268 static void dump_afu_descriptor(struct cxl_afu *afu)
0269 {
0270 u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
0271 int i;
0272
0273 #define show_reg(name, what) \
0274 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
0275
0276 val = AFUD_READ_INFO(afu);
0277 show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
0278 show_reg("num_of_processes", AFUD_NUM_PROCS(val));
0279 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
0280 show_reg("req_prog_mode", val & 0xffffULL);
0281 afu_cr_num = AFUD_NUM_CRS(val);
0282
0283 val = AFUD_READ(afu, 0x8);
0284 show_reg("Reserved", val);
0285 val = AFUD_READ(afu, 0x10);
0286 show_reg("Reserved", val);
0287 val = AFUD_READ(afu, 0x18);
0288 show_reg("Reserved", val);
0289
0290 val = AFUD_READ_CR(afu);
0291 show_reg("Reserved", (val >> (63-7)) & 0xff);
0292 show_reg("AFU_CR_len", AFUD_CR_LEN(val));
0293 afu_cr_len = AFUD_CR_LEN(val) * 256;
0294
0295 val = AFUD_READ_CR_OFF(afu);
0296 afu_cr_off = val;
0297 show_reg("AFU_CR_offset", val);
0298
0299 val = AFUD_READ_PPPSA(afu);
0300 show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
0301 show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
0302
0303 val = AFUD_READ_PPPSA_OFF(afu);
0304 show_reg("PerProcessPSA_offset", val);
0305
0306 val = AFUD_READ_EB(afu);
0307 show_reg("Reserved", (val >> (63-7)) & 0xff);
0308 show_reg("AFU_EB_len", AFUD_EB_LEN(val));
0309
0310 val = AFUD_READ_EB_OFF(afu);
0311 show_reg("AFU_EB_offset", val);
0312
0313 for (i = 0; i < afu_cr_num; i++) {
0314 val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
0315 show_reg("CR Vendor", val & 0xffff);
0316 show_reg("CR Device", (val >> 16) & 0xffff);
0317 }
0318 #undef show_reg
0319 }
0320
0321 #define P8_CAPP_UNIT0_ID 0xBA
0322 #define P8_CAPP_UNIT1_ID 0XBE
0323 #define P9_CAPP_UNIT0_ID 0xC0
0324 #define P9_CAPP_UNIT1_ID 0xE0
0325
0326 static int get_phb_index(struct device_node *np, u32 *phb_index)
0327 {
0328 if (of_property_read_u32(np, "ibm,phb-index", phb_index))
0329 return -ENODEV;
0330 return 0;
0331 }
0332
0333 static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
0334 {
0335
0336
0337
0338
0339
0340
0341
0342 if (cxl_is_power8()) {
0343 if (!pvr_version_is(PVR_POWER8NVL))
0344 return P8_CAPP_UNIT0_ID;
0345
0346 if (phb_index == 0)
0347 return P8_CAPP_UNIT0_ID;
0348
0349 if (phb_index == 1)
0350 return P8_CAPP_UNIT1_ID;
0351 }
0352
0353
0354
0355
0356
0357
0358
0359 if (cxl_is_power9()) {
0360 if (phb_index == 0)
0361 return P9_CAPP_UNIT0_ID;
0362
0363 if (phb_index == 3)
0364 return P9_CAPP_UNIT1_ID;
0365 }
0366
0367 return 0;
0368 }
0369
0370 int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
0371 u32 *phb_index, u64 *capp_unit_id)
0372 {
0373 int rc;
0374 struct device_node *np;
0375 const __be32 *prop;
0376
0377 if (!(np = pnv_pci_get_phb_node(dev)))
0378 return -ENODEV;
0379
0380 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
0381 np = of_get_next_parent(np);
0382 if (!np)
0383 return -ENODEV;
0384
0385 *chipid = be32_to_cpup(prop);
0386
0387 rc = get_phb_index(np, phb_index);
0388 if (rc) {
0389 pr_err("cxl: invalid phb index\n");
0390 return rc;
0391 }
0392
0393 *capp_unit_id = get_capp_unit_id(np, *phb_index);
0394 of_node_put(np);
0395 if (!*capp_unit_id) {
0396 pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n",
0397 *chipid, *phb_index);
0398 return -ENODEV;
0399 }
0400
0401 return 0;
0402 }
0403
0404 static DEFINE_MUTEX(indications_mutex);
0405
0406 static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind,
0407 u64 *nbwind)
0408 {
0409 static u64 nbw, asn, capi = 0;
0410 struct device_node *np;
0411 const __be32 *prop;
0412
0413 mutex_lock(&indications_mutex);
0414 if (!capi) {
0415 if (!(np = pnv_pci_get_phb_node(dev))) {
0416 mutex_unlock(&indications_mutex);
0417 return -ENODEV;
0418 }
0419
0420 prop = of_get_property(np, "ibm,phb-indications", NULL);
0421 if (!prop) {
0422 nbw = 0x0300UL;
0423 asn = 0x0400UL;
0424 capi = 0x0200UL;
0425 } else {
0426 nbw = (u64)be32_to_cpu(prop[2]);
0427 asn = (u64)be32_to_cpu(prop[1]);
0428 capi = (u64)be32_to_cpu(prop[0]);
0429 }
0430 of_node_put(np);
0431 }
0432 *capiind = capi;
0433 *asnind = asn;
0434 *nbwind = nbw;
0435 mutex_unlock(&indications_mutex);
0436 return 0;
0437 }
0438
0439 int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
0440 {
0441 u64 xsl_dsnctl;
0442 u64 capiind, asnind, nbwind;
0443
0444
0445
0446
0447
0448
0449 if (get_phb_indications(dev, &capiind, &asnind, &nbwind))
0450 return -ENODEV;
0451
0452
0453
0454
0455
0456 xsl_dsnctl = (capiind << (63-15));
0457 xsl_dsnctl |= (capp_unit_id << (63-15));
0458
0459
0460 xsl_dsnctl |= ((u64)0x09 << (63-28));
0461
0462
0463
0464
0465
0466
0467
0468
0469 xsl_dsnctl |= (nbwind << (63-55));
0470
0471
0472
0473
0474
0475
0476 xsl_dsnctl |= asnind;
0477
0478 *reg = xsl_dsnctl;
0479 return 0;
0480 }
0481
0482 static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
0483 struct pci_dev *dev)
0484 {
0485 u64 xsl_dsnctl, psl_fircntl;
0486 u64 chipid;
0487 u32 phb_index;
0488 u64 capp_unit_id;
0489 u64 psl_debug;
0490 int rc;
0491
0492 rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
0493 if (rc)
0494 return rc;
0495
0496 rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl);
0497 if (rc)
0498 return rc;
0499
0500 cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
0501
0502
0503 psl_fircntl = (0x2ULL << (63-3));
0504 psl_fircntl |= (0x1ULL << (63-6));
0505 psl_fircntl |= 0x1ULL;
0506 cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
0507
0508
0509
0510
0511 cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL);
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521 cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
0522
0523
0524 cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
0525
0526 if (phb_index == 3) {
0527
0528 cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
0529 }
0530
0531
0532 cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
0533
0534
0535 cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
0536
0537
0538
0539
0540
0541 psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
0542 if (psl_debug & CXL_PSL_DEBUG_CDC) {
0543 dev_dbg(&dev->dev, "No data-cache present\n");
0544 adapter->native->no_data_cache = true;
0545 }
0546
0547 return 0;
0548 }
0549
0550 static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev)
0551 {
0552 u64 psl_dsnctl, psl_fircntl;
0553 u64 chipid;
0554 u32 phb_index;
0555 u64 capp_unit_id;
0556 int rc;
0557
0558 rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
0559 if (rc)
0560 return rc;
0561
0562 psl_dsnctl = 0x0000900000000000ULL;
0563 psl_dsnctl |= (0x2ULL << (63-38));
0564
0565 psl_dsnctl |= (chipid << (63-5));
0566 psl_dsnctl |= (capp_unit_id << (63-13));
0567
0568 cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
0569 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
0570
0571 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
0572
0573 psl_fircntl = (0x2ULL << (63-3));
0574 psl_fircntl |= (0x1ULL << (63-6));
0575 psl_fircntl |= 0x1ULL;
0576 cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
0577
0578 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
0579
0580 return 0;
0581 }
0582
0583
0584 #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
0585 #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
0586
0587 #define PSL_2048_250MHZ_CYCLES 1
0588
0589 static void write_timebase_ctrl_psl8(struct cxl *adapter)
0590 {
0591 cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
0592 TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
0593 }
0594
0595 static u64 timebase_read_psl9(struct cxl *adapter)
0596 {
0597 return cxl_p1_read(adapter, CXL_PSL9_Timebase);
0598 }
0599
0600 static u64 timebase_read_psl8(struct cxl *adapter)
0601 {
0602 return cxl_p1_read(adapter, CXL_PSL_Timebase);
0603 }
0604
0605 static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
0606 {
0607 struct device_node *np;
0608
0609 adapter->psl_timebase_synced = false;
0610
0611 if (!(np = pnv_pci_get_phb_node(dev)))
0612 return;
0613
0614
0615 of_node_get(np);
0616 if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
0617 of_node_put(np);
0618 dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
0619 return;
0620 }
0621 of_node_put(np);
0622
0623
0624
0625
0626
0627 if (adapter->native->sl_ops->write_timebase_ctrl)
0628 adapter->native->sl_ops->write_timebase_ctrl(adapter);
0629
0630
0631 cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
0632 cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
0633
0634 return;
0635 }
0636
0637 static int init_implementation_afu_regs_psl9(struct cxl_afu *afu)
0638 {
0639 return 0;
0640 }
0641
0642 static int init_implementation_afu_regs_psl8(struct cxl_afu *afu)
0643 {
0644
0645 cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
0646
0647 cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
0648
0649 cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
0650 cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
0651
0652 return 0;
0653 }
0654
0655 int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
0656 unsigned int virq)
0657 {
0658 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
0659
0660 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
0661 }
0662
0663 int cxl_update_image_control(struct cxl *adapter)
0664 {
0665 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
0666 int rc;
0667 int vsec;
0668 u8 image_state;
0669
0670 if (!(vsec = find_cxl_vsec(dev))) {
0671 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
0672 return -ENODEV;
0673 }
0674
0675 if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
0676 dev_err(&dev->dev, "failed to read image state: %i\n", rc);
0677 return rc;
0678 }
0679
0680 if (adapter->perst_loads_image)
0681 image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
0682 else
0683 image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
0684
0685 if (adapter->perst_select_user)
0686 image_state |= CXL_VSEC_PERST_SELECT_USER;
0687 else
0688 image_state &= ~CXL_VSEC_PERST_SELECT_USER;
0689
0690 if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
0691 dev_err(&dev->dev, "failed to update image control: %i\n", rc);
0692 return rc;
0693 }
0694
0695 return 0;
0696 }
0697
0698 int cxl_pci_alloc_one_irq(struct cxl *adapter)
0699 {
0700 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
0701
0702 return pnv_cxl_alloc_hwirqs(dev, 1);
0703 }
0704
0705 void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
0706 {
0707 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
0708
0709 return pnv_cxl_release_hwirqs(dev, hwirq, 1);
0710 }
0711
0712 int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
0713 struct cxl *adapter, unsigned int num)
0714 {
0715 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
0716
0717 return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
0718 }
0719
0720 void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
0721 struct cxl *adapter)
0722 {
0723 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
0724
0725 pnv_cxl_release_hwirq_ranges(irqs, dev);
0726 }
0727
0728 static int setup_cxl_bars(struct pci_dev *dev)
0729 {
0730
0731 if ((p1_base(dev) < 0x100000000ULL) ||
0732 (p2_base(dev) < 0x100000000ULL)) {
0733 dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
0734 return -ENODEV;
0735 }
0736
0737
0738
0739
0740
0741
0742 pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
0743 pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
0744
0745 return 0;
0746 }
0747
0748
0749 static int switch_card_to_cxl(struct pci_dev *dev)
0750 {
0751 int vsec;
0752 u8 val;
0753 int rc;
0754
0755 dev_info(&dev->dev, "switch card to CXL\n");
0756
0757 if (!(vsec = find_cxl_vsec(dev))) {
0758 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
0759 return -ENODEV;
0760 }
0761
0762 if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
0763 dev_err(&dev->dev, "failed to read current mode control: %i", rc);
0764 return rc;
0765 }
0766 val &= ~CXL_VSEC_PROTOCOL_MASK;
0767 val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
0768 if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
0769 dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
0770 return rc;
0771 }
0772
0773
0774
0775
0776
0777 msleep(100);
0778
0779 return 0;
0780 }
0781
0782 static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
0783 {
0784 u64 p1n_base, p2n_base, afu_desc;
0785 const u64 p1n_size = 0x100;
0786 const u64 p2n_size = 0x1000;
0787
0788 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
0789 p2n_base = p2_base(dev) + (afu->slice * p2n_size);
0790 afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
0791 afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
0792
0793 if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
0794 goto err;
0795 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
0796 goto err1;
0797 if (afu_desc) {
0798 if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
0799 goto err2;
0800 }
0801
0802 return 0;
0803 err2:
0804 iounmap(afu->p2n_mmio);
0805 err1:
0806 iounmap(afu->native->p1n_mmio);
0807 err:
0808 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
0809 return -ENOMEM;
0810 }
0811
0812 static void pci_unmap_slice_regs(struct cxl_afu *afu)
0813 {
0814 if (afu->p2n_mmio) {
0815 iounmap(afu->p2n_mmio);
0816 afu->p2n_mmio = NULL;
0817 }
0818 if (afu->native->p1n_mmio) {
0819 iounmap(afu->native->p1n_mmio);
0820 afu->native->p1n_mmio = NULL;
0821 }
0822 if (afu->native->afu_desc_mmio) {
0823 iounmap(afu->native->afu_desc_mmio);
0824 afu->native->afu_desc_mmio = NULL;
0825 }
0826 }
0827
0828 void cxl_pci_release_afu(struct device *dev)
0829 {
0830 struct cxl_afu *afu = to_cxl_afu(dev);
0831
0832 pr_devel("%s\n", __func__);
0833
0834 idr_destroy(&afu->contexts_idr);
0835 cxl_release_spa(afu);
0836
0837 kfree(afu->native);
0838 kfree(afu);
0839 }
0840
0841
0842 static int cxl_read_afu_descriptor(struct cxl_afu *afu)
0843 {
0844 u64 val;
0845
0846 val = AFUD_READ_INFO(afu);
0847 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
0848 afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
0849 afu->crs_num = AFUD_NUM_CRS(val);
0850
0851 if (AFUD_AFU_DIRECTED(val))
0852 afu->modes_supported |= CXL_MODE_DIRECTED;
0853 if (AFUD_DEDICATED_PROCESS(val))
0854 afu->modes_supported |= CXL_MODE_DEDICATED;
0855 if (AFUD_TIME_SLICED(val))
0856 afu->modes_supported |= CXL_MODE_TIME_SLICED;
0857
0858 val = AFUD_READ_PPPSA(afu);
0859 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
0860 afu->psa = AFUD_PPPSA_PSA(val);
0861 if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
0862 afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
0863
0864 val = AFUD_READ_CR(afu);
0865 afu->crs_len = AFUD_CR_LEN(val) * 256;
0866 afu->crs_offset = AFUD_READ_CR_OFF(afu);
0867
0868
0869
0870 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
0871 afu->eb_offset = AFUD_READ_EB_OFF(afu);
0872
0873
0874 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
0875 dev_warn(&afu->dev,
0876 "Invalid AFU error buffer offset %Lx\n",
0877 afu->eb_offset);
0878 dev_info(&afu->dev,
0879 "Ignoring AFU error buffer in the descriptor\n");
0880
0881 afu->eb_len = 0;
0882 }
0883
0884 return 0;
0885 }
0886
0887 static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
0888 {
0889 int i, rc;
0890 u32 val;
0891
0892 if (afu->psa && afu->adapter->ps_size <
0893 (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
0894 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
0895 return -ENODEV;
0896 }
0897
0898 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
0899 dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size);
0900
0901 for (i = 0; i < afu->crs_num; i++) {
0902 rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
0903 if (rc || val == 0) {
0904 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
0905 return -EINVAL;
0906 }
0907 }
0908
0909 if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920 dev_err(&afu->dev, "AFU does not support any processes\n");
0921 return -EINVAL;
0922 }
0923
0924 return 0;
0925 }
0926
0927 static int sanitise_afu_regs_psl9(struct cxl_afu *afu)
0928 {
0929 u64 reg;
0930
0931
0932
0933
0934
0935
0936 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
0937 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
0938 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
0939 if (cxl_ops->afu_reset(afu))
0940 return -EIO;
0941 if (cxl_afu_disable(afu))
0942 return -EIO;
0943 if (cxl_psl_purge(afu))
0944 return -EIO;
0945 }
0946 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
0947 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
0948 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
0949 if (reg) {
0950 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
0951 if (reg & CXL_PSL9_DSISR_An_TF)
0952 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
0953 else
0954 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
0955 }
0956 if (afu->adapter->native->sl_ops->register_serr_irq) {
0957 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
0958 if (reg) {
0959 if (reg & ~0x000000007fffffff)
0960 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
0961 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
0962 }
0963 }
0964 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
0965 if (reg) {
0966 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
0967 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
0968 }
0969
0970 return 0;
0971 }
0972
0973 static int sanitise_afu_regs_psl8(struct cxl_afu *afu)
0974 {
0975 u64 reg;
0976
0977
0978
0979
0980
0981
0982 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
0983 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
0984 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
0985 if (cxl_ops->afu_reset(afu))
0986 return -EIO;
0987 if (cxl_afu_disable(afu))
0988 return -EIO;
0989 if (cxl_psl_purge(afu))
0990 return -EIO;
0991 }
0992 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
0993 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
0994 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
0995 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
0996 cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
0997 cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
0998 cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
0999 cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
1000 cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
1001 cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
1002 cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
1003 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1004 if (reg) {
1005 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
1006 if (reg & CXL_PSL_DSISR_TRANS)
1007 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1008 else
1009 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1010 }
1011 if (afu->adapter->native->sl_ops->register_serr_irq) {
1012 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1013 if (reg) {
1014 if (reg & ~0xffff)
1015 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
1016 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
1017 }
1018 }
1019 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1020 if (reg) {
1021 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
1022 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
1023 }
1024
1025 return 0;
1026 }
1027
1028 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
1029
1030
1031
1032
1033
1034
1035 ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
1036 loff_t off, size_t count)
1037 {
1038 loff_t aligned_start, aligned_end;
1039 size_t aligned_length;
1040 void *tbuf;
1041 const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
1042
1043 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
1044 return 0;
1045
1046
1047 count = min((size_t)(afu->eb_len - off), count);
1048 aligned_start = round_down(off, 8);
1049 aligned_end = round_up(off + count, 8);
1050 aligned_length = aligned_end - aligned_start;
1051
1052
1053 if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
1054 aligned_length = ERR_BUFF_MAX_COPY_SIZE;
1055 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
1056 }
1057
1058
1059 tbuf = (void *)__get_free_page(GFP_KERNEL);
1060 if (!tbuf)
1061 return -ENOMEM;
1062
1063
1064 memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
1065 memcpy(buf, tbuf + (off & 0x7), count);
1066
1067 free_page((unsigned long)tbuf);
1068
1069 return count;
1070 }
1071
1072 static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
1073 {
1074 int rc;
1075
1076 if ((rc = pci_map_slice_regs(afu, adapter, dev)))
1077 return rc;
1078
1079 if (adapter->native->sl_ops->sanitise_afu_regs) {
1080 rc = adapter->native->sl_ops->sanitise_afu_regs(afu);
1081 if (rc)
1082 goto err1;
1083 }
1084
1085
1086 if ((rc = cxl_ops->afu_reset(afu)))
1087 goto err1;
1088
1089 if (cxl_verbose)
1090 dump_afu_descriptor(afu);
1091
1092 if ((rc = cxl_read_afu_descriptor(afu)))
1093 goto err1;
1094
1095 if ((rc = cxl_afu_descriptor_looks_ok(afu)))
1096 goto err1;
1097
1098 if (adapter->native->sl_ops->afu_regs_init)
1099 if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
1100 goto err1;
1101
1102 if (adapter->native->sl_ops->register_serr_irq)
1103 if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
1104 goto err1;
1105
1106 if ((rc = cxl_native_register_psl_irq(afu)))
1107 goto err2;
1108
1109 atomic_set(&afu->configured_state, 0);
1110 return 0;
1111
1112 err2:
1113 if (adapter->native->sl_ops->release_serr_irq)
1114 adapter->native->sl_ops->release_serr_irq(afu);
1115 err1:
1116 pci_unmap_slice_regs(afu);
1117 return rc;
1118 }
1119
1120 static void pci_deconfigure_afu(struct cxl_afu *afu)
1121 {
1122
1123
1124
1125
1126 if (atomic_read(&afu->configured_state) != -1) {
1127 while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
1128 schedule();
1129 }
1130 cxl_native_release_psl_irq(afu);
1131 if (afu->adapter->native->sl_ops->release_serr_irq)
1132 afu->adapter->native->sl_ops->release_serr_irq(afu);
1133 pci_unmap_slice_regs(afu);
1134 }
1135
1136 static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
1137 {
1138 struct cxl_afu *afu;
1139 int rc = -ENOMEM;
1140
1141 afu = cxl_alloc_afu(adapter, slice);
1142 if (!afu)
1143 return -ENOMEM;
1144
1145 afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
1146 if (!afu->native)
1147 goto err_free_afu;
1148
1149 mutex_init(&afu->native->spa_mutex);
1150
1151 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
1152 if (rc)
1153 goto err_free_native;
1154
1155 rc = pci_configure_afu(afu, adapter, dev);
1156 if (rc)
1157 goto err_free_native;
1158
1159
1160 cxl_debugfs_afu_add(afu);
1161
1162
1163
1164
1165
1166 if ((rc = cxl_register_afu(afu)))
1167 goto err_put1;
1168
1169 if ((rc = cxl_sysfs_afu_add(afu)))
1170 goto err_put1;
1171
1172 adapter->afu[afu->slice] = afu;
1173
1174 if ((rc = cxl_pci_vphb_add(afu)))
1175 dev_info(&afu->dev, "Can't register vPHB\n");
1176
1177 return 0;
1178
1179 err_put1:
1180 pci_deconfigure_afu(afu);
1181 cxl_debugfs_afu_remove(afu);
1182 device_unregister(&afu->dev);
1183 return rc;
1184
1185 err_free_native:
1186 kfree(afu->native);
1187 err_free_afu:
1188 kfree(afu);
1189 return rc;
1190
1191 }
1192
1193 static void cxl_pci_remove_afu(struct cxl_afu *afu)
1194 {
1195 pr_devel("%s\n", __func__);
1196
1197 if (!afu)
1198 return;
1199
1200 cxl_pci_vphb_remove(afu);
1201 cxl_sysfs_afu_remove(afu);
1202 cxl_debugfs_afu_remove(afu);
1203
1204 spin_lock(&afu->adapter->afu_list_lock);
1205 afu->adapter->afu[afu->slice] = NULL;
1206 spin_unlock(&afu->adapter->afu_list_lock);
1207
1208 cxl_context_detach_all(afu);
1209 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1210
1211 pci_deconfigure_afu(afu);
1212 device_unregister(&afu->dev);
1213 }
1214
1215 int cxl_pci_reset(struct cxl *adapter)
1216 {
1217 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1218 int rc;
1219
1220 if (adapter->perst_same_image) {
1221 dev_warn(&dev->dev,
1222 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
1223 return -EINVAL;
1224 }
1225
1226 dev_info(&dev->dev, "CXL reset\n");
1227
1228
1229
1230
1231 cxl_data_cache_flush(adapter);
1232
1233
1234
1235
1236 if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
1237 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
1238 return rc;
1239 }
1240
1241 return rc;
1242 }
1243
1244 static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
1245 {
1246 if (pci_request_region(dev, 2, "priv 2 regs"))
1247 goto err1;
1248 if (pci_request_region(dev, 0, "priv 1 regs"))
1249 goto err2;
1250
1251 pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
1252 p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
1253
1254 if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
1255 goto err3;
1256
1257 if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
1258 goto err4;
1259
1260 return 0;
1261
1262 err4:
1263 iounmap(adapter->native->p1_mmio);
1264 adapter->native->p1_mmio = NULL;
1265 err3:
1266 pci_release_region(dev, 0);
1267 err2:
1268 pci_release_region(dev, 2);
1269 err1:
1270 return -ENOMEM;
1271 }
1272
1273 static void cxl_unmap_adapter_regs(struct cxl *adapter)
1274 {
1275 if (adapter->native->p1_mmio) {
1276 iounmap(adapter->native->p1_mmio);
1277 adapter->native->p1_mmio = NULL;
1278 pci_release_region(to_pci_dev(adapter->dev.parent), 2);
1279 }
1280 if (adapter->native->p2_mmio) {
1281 iounmap(adapter->native->p2_mmio);
1282 adapter->native->p2_mmio = NULL;
1283 pci_release_region(to_pci_dev(adapter->dev.parent), 0);
1284 }
1285 }
1286
1287 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
1288 {
1289 int vsec;
1290 u32 afu_desc_off, afu_desc_size;
1291 u32 ps_off, ps_size;
1292 u16 vseclen;
1293 u8 image_state;
1294
1295 if (!(vsec = find_cxl_vsec(dev))) {
1296 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
1297 return -ENODEV;
1298 }
1299
1300 CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
1301 if (vseclen < CXL_VSEC_MIN_SIZE) {
1302 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
1303 return -EINVAL;
1304 }
1305
1306 CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
1307 CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
1308 CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
1309 CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
1310 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
1311 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
1312 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1313 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1314 adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
1315
1316 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
1317 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
1318 CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
1319 CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
1320 CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
1321
1322
1323
1324 adapter->native->ps_off = ps_off * 64 * 1024;
1325 adapter->ps_size = ps_size * 64 * 1024;
1326 adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
1327 adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
1328
1329
1330 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
1331
1332 return 0;
1333 }
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343 static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
1344 {
1345 int aer;
1346 u32 data;
1347
1348 if (adapter->psl_rev & 0xf000)
1349 return;
1350 if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
1351 return;
1352 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
1353 if (data & PCI_ERR_UNC_MALF_TLP)
1354 if (data & PCI_ERR_UNC_INTN)
1355 return;
1356 data |= PCI_ERR_UNC_MALF_TLP;
1357 data |= PCI_ERR_UNC_INTN;
1358 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
1359 }
1360
1361 static bool cxl_compatible_caia_version(struct cxl *adapter)
1362 {
1363 if (cxl_is_power8() && (adapter->caia_major == 1))
1364 return true;
1365
1366 if (cxl_is_power9() && (adapter->caia_major == 2))
1367 return true;
1368
1369 return false;
1370 }
1371
1372 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
1373 {
1374 if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
1375 return -EBUSY;
1376
1377 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
1378 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
1379 return -EINVAL;
1380 }
1381
1382 if (!cxl_compatible_caia_version(adapter)) {
1383 dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n",
1384 adapter->caia_major);
1385 return -ENODEV;
1386 }
1387
1388 if (!adapter->slices) {
1389
1390
1391 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
1392 return -EINVAL;
1393 }
1394
1395 if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
1396 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
1397 return -EINVAL;
1398 }
1399
1400 if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
1401 dev_err(&dev->dev, "ABORTING: Problem state size larger than "
1402 "available in BAR2: 0x%llx > 0x%llx\n",
1403 adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
1404 return -EINVAL;
1405 }
1406
1407 return 0;
1408 }
1409
1410 ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1411 {
1412 return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
1413 }
1414
1415 static void cxl_release_adapter(struct device *dev)
1416 {
1417 struct cxl *adapter = to_cxl_adapter(dev);
1418
1419 pr_devel("cxl_release_adapter\n");
1420
1421 cxl_remove_adapter_nr(adapter);
1422
1423 kfree(adapter->native);
1424 kfree(adapter);
1425 }
1426
1427 #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1428
1429 static int sanitise_adapter_regs(struct cxl *adapter)
1430 {
1431 int rc = 0;
1432
1433
1434 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
1435
1436 if (adapter->native->sl_ops->invalidate_all) {
1437
1438 if (cxl_is_power9() && (adapter->perst_loads_image))
1439 return 0;
1440 rc = adapter->native->sl_ops->invalidate_all(adapter);
1441 }
1442
1443 return rc;
1444 }
1445
1446
1447
1448
1449 static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
1450 {
1451 int rc;
1452
1453 adapter->dev.parent = &dev->dev;
1454 adapter->dev.release = cxl_release_adapter;
1455 pci_set_drvdata(dev, adapter);
1456
1457 rc = pci_enable_device(dev);
1458 if (rc) {
1459 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
1460 return rc;
1461 }
1462
1463 if ((rc = cxl_read_vsec(adapter, dev)))
1464 return rc;
1465
1466 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
1467 return rc;
1468
1469 cxl_fixup_malformed_tlp(adapter, dev);
1470
1471 if ((rc = setup_cxl_bars(dev)))
1472 return rc;
1473
1474 if ((rc = switch_card_to_cxl(dev)))
1475 return rc;
1476
1477 if ((rc = cxl_update_image_control(adapter)))
1478 return rc;
1479
1480 if ((rc = cxl_map_adapter_regs(adapter, dev)))
1481 return rc;
1482
1483 if ((rc = sanitise_adapter_regs(adapter)))
1484 goto err;
1485
1486 if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
1487 goto err;
1488
1489
1490 pci_set_master(dev);
1491
1492 adapter->tunneled_ops_supported = false;
1493
1494 if (cxl_is_power9()) {
1495 if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1))
1496 dev_info(&dev->dev, "Tunneled operations unsupported\n");
1497 else
1498 adapter->tunneled_ops_supported = true;
1499 }
1500
1501 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
1502 goto err;
1503
1504
1505
1506 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
1507 goto err;
1508
1509
1510 cxl_setup_psl_timebase(adapter, dev);
1511
1512 if ((rc = cxl_native_register_psl_err_irq(adapter)))
1513 goto err;
1514
1515 return 0;
1516
1517 err:
1518 cxl_unmap_adapter_regs(adapter);
1519 return rc;
1520
1521 }
1522
1523 static void cxl_deconfigure_adapter(struct cxl *adapter)
1524 {
1525 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
1526
1527 if (cxl_is_power9())
1528 pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0);
1529
1530 cxl_native_release_psl_err_irq(adapter);
1531 cxl_unmap_adapter_regs(adapter);
1532
1533 pci_disable_device(pdev);
1534 }
1535
1536 static void cxl_stop_trace_psl9(struct cxl *adapter)
1537 {
1538 int traceid;
1539 u64 trace_state, trace_mask;
1540 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1541
1542
1543 for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
1544 trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
1545 trace_mask = (0x3ULL << (62 - traceid * 2));
1546 trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
1547 dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
1548 traceid, trace_state);
1549
1550
1551 if (trace_state != CXL_PSL9_TRACESTATE_FIN)
1552 cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
1553 0x8400000000000000ULL | traceid);
1554 }
1555 }
1556
1557 static void cxl_stop_trace_psl8(struct cxl *adapter)
1558 {
1559 int slice;
1560
1561
1562 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
1563
1564
1565 spin_lock(&adapter->afu_list_lock);
1566 for (slice = 0; slice < adapter->slices; slice++) {
1567 if (adapter->afu[slice])
1568 cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
1569 0x8000000000000000LL);
1570 }
1571 spin_unlock(&adapter->afu_list_lock);
1572 }
1573
1574 static const struct cxl_service_layer_ops psl9_ops = {
1575 .adapter_regs_init = init_implementation_adapter_regs_psl9,
1576 .invalidate_all = cxl_invalidate_all_psl9,
1577 .afu_regs_init = init_implementation_afu_regs_psl9,
1578 .sanitise_afu_regs = sanitise_afu_regs_psl9,
1579 .register_serr_irq = cxl_native_register_serr_irq,
1580 .release_serr_irq = cxl_native_release_serr_irq,
1581 .handle_interrupt = cxl_irq_psl9,
1582 .fail_irq = cxl_fail_irq_psl,
1583 .activate_dedicated_process = cxl_activate_dedicated_process_psl9,
1584 .attach_afu_directed = cxl_attach_afu_directed_psl9,
1585 .attach_dedicated_process = cxl_attach_dedicated_process_psl9,
1586 .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9,
1587 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
1588 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
1589 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
1590 .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
1591 .debugfs_stop_trace = cxl_stop_trace_psl9,
1592 .timebase_read = timebase_read_psl9,
1593 .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
1594 .needs_reset_before_disable = true,
1595 };
1596
1597 static const struct cxl_service_layer_ops psl8_ops = {
1598 .adapter_regs_init = init_implementation_adapter_regs_psl8,
1599 .invalidate_all = cxl_invalidate_all_psl8,
1600 .afu_regs_init = init_implementation_afu_regs_psl8,
1601 .sanitise_afu_regs = sanitise_afu_regs_psl8,
1602 .register_serr_irq = cxl_native_register_serr_irq,
1603 .release_serr_irq = cxl_native_release_serr_irq,
1604 .handle_interrupt = cxl_irq_psl8,
1605 .fail_irq = cxl_fail_irq_psl,
1606 .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
1607 .attach_afu_directed = cxl_attach_afu_directed_psl8,
1608 .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
1609 .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
1610 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
1611 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
1612 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
1613 .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
1614 .debugfs_stop_trace = cxl_stop_trace_psl8,
1615 .write_timebase_ctrl = write_timebase_ctrl_psl8,
1616 .timebase_read = timebase_read_psl8,
1617 .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
1618 .needs_reset_before_disable = true,
1619 };
1620
1621 static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
1622 {
1623 if (cxl_is_power8()) {
1624 dev_info(&dev->dev, "Device uses a PSL8\n");
1625 adapter->native->sl_ops = &psl8_ops;
1626 } else {
1627 dev_info(&dev->dev, "Device uses a PSL9\n");
1628 adapter->native->sl_ops = &psl9_ops;
1629 }
1630 }
1631
1632
1633 static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
1634 {
1635 struct cxl *adapter;
1636 int rc;
1637
1638 adapter = cxl_alloc_adapter();
1639 if (!adapter)
1640 return ERR_PTR(-ENOMEM);
1641
1642 adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
1643 if (!adapter->native) {
1644 rc = -ENOMEM;
1645 goto err_release;
1646 }
1647
1648 set_sl_ops(adapter, dev);
1649
1650
1651
1652
1653 adapter->perst_loads_image = true;
1654 adapter->perst_same_image = false;
1655
1656 rc = cxl_configure_adapter(adapter, dev);
1657 if (rc) {
1658 pci_disable_device(dev);
1659 goto err_release;
1660 }
1661
1662
1663 cxl_debugfs_adapter_add(adapter);
1664
1665
1666
1667
1668
1669 if ((rc = cxl_register_adapter(adapter)))
1670 goto err_put1;
1671
1672 if ((rc = cxl_sysfs_adapter_add(adapter)))
1673 goto err_put1;
1674
1675
1676 cxl_adapter_context_unlock(adapter);
1677
1678 return adapter;
1679
1680 err_put1:
1681
1682
1683
1684 cxl_debugfs_adapter_remove(adapter);
1685 cxl_deconfigure_adapter(adapter);
1686 device_unregister(&adapter->dev);
1687 return ERR_PTR(rc);
1688
1689 err_release:
1690 cxl_release_adapter(&adapter->dev);
1691 return ERR_PTR(rc);
1692 }
1693
1694 static void cxl_pci_remove_adapter(struct cxl *adapter)
1695 {
1696 pr_devel("cxl_remove_adapter\n");
1697
1698 cxl_sysfs_adapter_remove(adapter);
1699 cxl_debugfs_adapter_remove(adapter);
1700
1701
1702
1703
1704 cxl_data_cache_flush(adapter);
1705
1706 cxl_deconfigure_adapter(adapter);
1707
1708 device_unregister(&adapter->dev);
1709 }
1710
1711 #define CXL_MAX_PCIEX_PARENT 2
1712
1713 int cxl_slot_is_switched(struct pci_dev *dev)
1714 {
1715 struct device_node *np;
1716 int depth = 0;
1717
1718 if (!(np = pci_device_to_OF_node(dev))) {
1719 pr_err("cxl: np = NULL\n");
1720 return -ENODEV;
1721 }
1722 of_node_get(np);
1723 while (np) {
1724 np = of_get_next_parent(np);
1725 if (!of_node_is_type(np, "pciex"))
1726 break;
1727 depth++;
1728 }
1729 of_node_put(np);
1730 return (depth > CXL_MAX_PCIEX_PARENT);
1731 }
1732
1733 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1734 {
1735 struct cxl *adapter;
1736 int slice;
1737 int rc;
1738
1739 if (cxl_pci_is_vphb_device(dev)) {
1740 dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
1741 return -ENODEV;
1742 }
1743
1744 if (cxl_slot_is_switched(dev)) {
1745 dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
1746 return -ENODEV;
1747 }
1748
1749 if (cxl_is_power9() && !radix_enabled()) {
1750 dev_info(&dev->dev, "Only Radix mode supported\n");
1751 return -ENODEV;
1752 }
1753
1754 if (cxl_verbose)
1755 dump_cxl_config_space(dev);
1756
1757 adapter = cxl_pci_init_adapter(dev);
1758 if (IS_ERR(adapter)) {
1759 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
1760 return PTR_ERR(adapter);
1761 }
1762
1763 for (slice = 0; slice < adapter->slices; slice++) {
1764 if ((rc = pci_init_afu(adapter, slice, dev))) {
1765 dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
1766 continue;
1767 }
1768
1769 rc = cxl_afu_select_best_mode(adapter->afu[slice]);
1770 if (rc)
1771 dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
1772 }
1773
1774 return 0;
1775 }
1776
1777 static void cxl_remove(struct pci_dev *dev)
1778 {
1779 struct cxl *adapter = pci_get_drvdata(dev);
1780 struct cxl_afu *afu;
1781 int i;
1782
1783
1784
1785
1786
1787 for (i = 0; i < adapter->slices; i++) {
1788 afu = adapter->afu[i];
1789 cxl_pci_remove_afu(afu);
1790 }
1791 cxl_pci_remove_adapter(adapter);
1792 }
1793
1794 static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
1795 pci_channel_state_t state)
1796 {
1797 struct pci_dev *afu_dev;
1798 struct pci_driver *afu_drv;
1799 const struct pci_error_handlers *err_handler;
1800 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1801 pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
1802
1803
1804
1805
1806 if (afu == NULL || afu->phb == NULL)
1807 return result;
1808
1809 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1810 afu_drv = to_pci_driver(afu_dev->dev.driver);
1811 if (!afu_drv)
1812 continue;
1813
1814 afu_dev->error_state = state;
1815
1816 err_handler = afu_drv->err_handler;
1817 if (err_handler)
1818 afu_result = err_handler->error_detected(afu_dev,
1819 state);
1820
1821 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1822 result = PCI_ERS_RESULT_DISCONNECT;
1823 else if ((afu_result == PCI_ERS_RESULT_NONE) &&
1824 (result == PCI_ERS_RESULT_NEED_RESET))
1825 result = PCI_ERS_RESULT_NONE;
1826 }
1827 return result;
1828 }
1829
1830 static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1831 pci_channel_state_t state)
1832 {
1833 struct cxl *adapter = pci_get_drvdata(pdev);
1834 struct cxl_afu *afu;
1835 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1836 pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
1837 int i;
1838
1839
1840
1841
1842
1843 schedule();
1844
1845
1846 if (state == pci_channel_io_perm_failure) {
1847 spin_lock(&adapter->afu_list_lock);
1848 for (i = 0; i < adapter->slices; i++) {
1849 afu = adapter->afu[i];
1850
1851
1852
1853
1854 cxl_vphb_error_detected(afu, state);
1855 }
1856 spin_unlock(&adapter->afu_list_lock);
1857 return PCI_ERS_RESULT_DISCONNECT;
1858 }
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882 if (adapter->perst_loads_image && !adapter->perst_same_image) {
1883
1884 dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
1885 return PCI_ERS_RESULT_NONE;
1886 }
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940 spin_lock(&adapter->afu_list_lock);
1941
1942 for (i = 0; i < adapter->slices; i++) {
1943 afu = adapter->afu[i];
1944
1945 if (afu == NULL)
1946 continue;
1947
1948 afu_result = cxl_vphb_error_detected(afu, state);
1949 cxl_context_detach_all(afu);
1950 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1951 pci_deconfigure_afu(afu);
1952
1953
1954 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1955 result = PCI_ERS_RESULT_DISCONNECT;
1956 else if ((afu_result == PCI_ERS_RESULT_NONE) &&
1957 (result == PCI_ERS_RESULT_NEED_RESET))
1958 result = PCI_ERS_RESULT_NONE;
1959 }
1960 spin_unlock(&adapter->afu_list_lock);
1961
1962
1963 if (cxl_adapter_context_lock(adapter) != 0)
1964 dev_warn(&adapter->dev,
1965 "Couldn't take context lock with %d active-contexts\n",
1966 atomic_read(&adapter->contexts_num));
1967
1968 cxl_deconfigure_adapter(adapter);
1969
1970 return result;
1971 }
1972
1973 static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
1974 {
1975 struct cxl *adapter = pci_get_drvdata(pdev);
1976 struct cxl_afu *afu;
1977 struct cxl_context *ctx;
1978 struct pci_dev *afu_dev;
1979 struct pci_driver *afu_drv;
1980 const struct pci_error_handlers *err_handler;
1981 pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
1982 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1983 int i;
1984
1985 if (cxl_configure_adapter(adapter, pdev))
1986 goto err;
1987
1988
1989
1990
1991
1992
1993 cxl_adapter_context_unlock(adapter);
1994
1995 spin_lock(&adapter->afu_list_lock);
1996 for (i = 0; i < adapter->slices; i++) {
1997 afu = adapter->afu[i];
1998
1999 if (afu == NULL)
2000 continue;
2001
2002 if (pci_configure_afu(afu, adapter, pdev))
2003 goto err_unlock;
2004
2005 if (cxl_afu_select_best_mode(afu))
2006 goto err_unlock;
2007
2008 if (afu->phb == NULL)
2009 continue;
2010
2011 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2012
2013
2014
2015 ctx = cxl_get_context(afu_dev);
2016
2017 if (ctx && cxl_release_context(ctx))
2018 goto err_unlock;
2019
2020 ctx = cxl_dev_context_init(afu_dev);
2021 if (IS_ERR(ctx))
2022 goto err_unlock;
2023
2024 afu_dev->dev.archdata.cxl_ctx = ctx;
2025
2026 if (cxl_ops->afu_check_and_enable(afu))
2027 goto err_unlock;
2028
2029 afu_dev->error_state = pci_channel_io_normal;
2030
2031
2032
2033
2034
2035
2036
2037 afu_drv = to_pci_driver(afu_dev->dev.driver);
2038 if (!afu_drv)
2039 continue;
2040
2041 err_handler = afu_drv->err_handler;
2042 if (err_handler && err_handler->slot_reset)
2043 afu_result = err_handler->slot_reset(afu_dev);
2044
2045 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
2046 result = PCI_ERS_RESULT_DISCONNECT;
2047 }
2048 }
2049
2050 spin_unlock(&adapter->afu_list_lock);
2051 return result;
2052
2053 err_unlock:
2054 spin_unlock(&adapter->afu_list_lock);
2055
2056 err:
2057
2058
2059
2060
2061 dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
2062 return PCI_ERS_RESULT_DISCONNECT;
2063 }
2064
2065 static void cxl_pci_resume(struct pci_dev *pdev)
2066 {
2067 struct cxl *adapter = pci_get_drvdata(pdev);
2068 struct cxl_afu *afu;
2069 struct pci_dev *afu_dev;
2070 struct pci_driver *afu_drv;
2071 const struct pci_error_handlers *err_handler;
2072 int i;
2073
2074
2075
2076
2077
2078 spin_lock(&adapter->afu_list_lock);
2079 for (i = 0; i < adapter->slices; i++) {
2080 afu = adapter->afu[i];
2081
2082 if (afu == NULL || afu->phb == NULL)
2083 continue;
2084
2085 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2086 afu_drv = to_pci_driver(afu_dev->dev.driver);
2087 if (!afu_drv)
2088 continue;
2089
2090 err_handler = afu_drv->err_handler;
2091 if (err_handler && err_handler->resume)
2092 err_handler->resume(afu_dev);
2093 }
2094 }
2095 spin_unlock(&adapter->afu_list_lock);
2096 }
2097
2098 static const struct pci_error_handlers cxl_err_handler = {
2099 .error_detected = cxl_pci_error_detected,
2100 .slot_reset = cxl_pci_slot_reset,
2101 .resume = cxl_pci_resume,
2102 };
2103
2104 struct pci_driver cxl_pci_driver = {
2105 .name = "cxl-pci",
2106 .id_table = cxl_pci_tbl,
2107 .probe = cxl_probe,
2108 .remove = cxl_remove,
2109 .shutdown = cxl_remove,
2110 .err_handler = &cxl_err_handler,
2111 };