0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <linux/auxiliary_bus.h>
0017 #include <linux/bits.h>
0018 #include <linux/delay.h>
0019 #include <linux/kernel.h>
0020 #include <linux/idr.h>
0021 #include <linux/module.h>
0022 #include <linux/pci.h>
0023 #include <linux/types.h>
0024
0025 #include "vsec.h"
0026
0027
0028 #define INTEL_DVSEC_ENTRIES 0xA
0029 #define INTEL_DVSEC_SIZE 0xB
0030 #define INTEL_DVSEC_TABLE 0xC
0031 #define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
0032 #define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
0033 #define TABLE_OFFSET_SHIFT 3
0034 #define PMT_XA_START 0
0035 #define PMT_XA_MAX INT_MAX
0036 #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
0037
0038 static DEFINE_IDA(intel_vsec_ida);
0039 static DEFINE_IDA(intel_vsec_sdsi_ida);
0040 static DEFINE_XARRAY_ALLOC(auxdev_array);
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 struct intel_vsec_header {
0053 u8 rev;
0054 u16 length;
0055 u16 id;
0056 u8 num_entries;
0057 u8 entry_size;
0058 u8 tbir;
0059 u32 offset;
0060 };
0061
0062 enum intel_vsec_id {
0063 VSEC_ID_TELEMETRY = 2,
0064 VSEC_ID_WATCHER = 3,
0065 VSEC_ID_CRASHLOG = 4,
0066 VSEC_ID_SDSI = 65,
0067 };
0068
0069 static enum intel_vsec_id intel_vsec_allow_list[] = {
0070 VSEC_ID_TELEMETRY,
0071 VSEC_ID_WATCHER,
0072 VSEC_ID_CRASHLOG,
0073 VSEC_ID_SDSI,
0074 };
0075
0076 static const char *intel_vsec_name(enum intel_vsec_id id)
0077 {
0078 switch (id) {
0079 case VSEC_ID_TELEMETRY:
0080 return "telemetry";
0081
0082 case VSEC_ID_WATCHER:
0083 return "watcher";
0084
0085 case VSEC_ID_CRASHLOG:
0086 return "crashlog";
0087
0088 case VSEC_ID_SDSI:
0089 return "sdsi";
0090
0091 default:
0092 return NULL;
0093 }
0094 }
0095
0096 static bool intel_vsec_allowed(u16 id)
0097 {
0098 int i;
0099
0100 for (i = 0; i < ARRAY_SIZE(intel_vsec_allow_list); i++)
0101 if (intel_vsec_allow_list[i] == id)
0102 return true;
0103
0104 return false;
0105 }
0106
0107 static bool intel_vsec_disabled(u16 id, unsigned long quirks)
0108 {
0109 switch (id) {
0110 case VSEC_ID_WATCHER:
0111 return !!(quirks & VSEC_QUIRK_NO_WATCHER);
0112
0113 case VSEC_ID_CRASHLOG:
0114 return !!(quirks & VSEC_QUIRK_NO_CRASHLOG);
0115
0116 default:
0117 return false;
0118 }
0119 }
0120
0121 static void intel_vsec_remove_aux(void *data)
0122 {
0123 auxiliary_device_delete(data);
0124 auxiliary_device_uninit(data);
0125 }
0126
0127 static void intel_vsec_dev_release(struct device *dev)
0128 {
0129 struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
0130
0131 ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
0132 kfree(intel_vsec_dev->resource);
0133 kfree(intel_vsec_dev);
0134 }
0135
0136 static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev,
0137 const char *name)
0138 {
0139 struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
0140 int ret, id;
0141
0142 ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
0143 if (ret < 0) {
0144 kfree(intel_vsec_dev);
0145 return ret;
0146 }
0147
0148 auxdev->id = ret;
0149 auxdev->name = name;
0150 auxdev->dev.parent = &pdev->dev;
0151 auxdev->dev.release = intel_vsec_dev_release;
0152
0153 ret = auxiliary_device_init(auxdev);
0154 if (ret < 0) {
0155 ida_free(intel_vsec_dev->ida, auxdev->id);
0156 kfree(intel_vsec_dev->resource);
0157 kfree(intel_vsec_dev);
0158 return ret;
0159 }
0160
0161 ret = auxiliary_device_add(auxdev);
0162 if (ret < 0) {
0163 auxiliary_device_uninit(auxdev);
0164 return ret;
0165 }
0166
0167 ret = devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux,
0168 auxdev);
0169 if (ret < 0)
0170 return ret;
0171
0172
0173 ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT,
0174 GFP_KERNEL);
0175 if (ret)
0176 return ret;
0177
0178 return 0;
0179 }
0180
0181 static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
0182 struct intel_vsec_platform_info *info)
0183 {
0184 struct intel_vsec_device *intel_vsec_dev;
0185 struct resource *res, *tmp;
0186 unsigned long quirks = info->quirks;
0187 int i;
0188
0189 if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks))
0190 return -EINVAL;
0191
0192 if (!header->num_entries) {
0193 dev_dbg(&pdev->dev, "Invalid 0 entry count for header id %d\n", header->id);
0194 return -EINVAL;
0195 }
0196
0197 if (!header->entry_size) {
0198 dev_dbg(&pdev->dev, "Invalid 0 entry size for header id %d\n", header->id);
0199 return -EINVAL;
0200 }
0201
0202 intel_vsec_dev = kzalloc(sizeof(*intel_vsec_dev), GFP_KERNEL);
0203 if (!intel_vsec_dev)
0204 return -ENOMEM;
0205
0206 res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL);
0207 if (!res) {
0208 kfree(intel_vsec_dev);
0209 return -ENOMEM;
0210 }
0211
0212 if (quirks & VSEC_QUIRK_TABLE_SHIFT)
0213 header->offset >>= TABLE_OFFSET_SHIFT;
0214
0215
0216
0217
0218
0219
0220 for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) {
0221 tmp->start = pdev->resource[header->tbir].start +
0222 header->offset + i * (header->entry_size * sizeof(u32));
0223 tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1;
0224 tmp->flags = IORESOURCE_MEM;
0225 }
0226
0227 intel_vsec_dev->pcidev = pdev;
0228 intel_vsec_dev->resource = res;
0229 intel_vsec_dev->num_resources = header->num_entries;
0230 intel_vsec_dev->info = info;
0231
0232 if (header->id == VSEC_ID_SDSI)
0233 intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
0234 else
0235 intel_vsec_dev->ida = &intel_vsec_ida;
0236
0237 return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
0238 }
0239
0240 static bool intel_vsec_walk_header(struct pci_dev *pdev,
0241 struct intel_vsec_platform_info *info)
0242 {
0243 struct intel_vsec_header **header = info->capabilities;
0244 bool have_devices = false;
0245 int ret;
0246
0247 for ( ; *header; header++) {
0248 ret = intel_vsec_add_dev(pdev, *header, info);
0249 if (ret)
0250 dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n",
0251 (*header)->id);
0252 else
0253 have_devices = true;
0254 }
0255
0256 return have_devices;
0257 }
0258
0259 static bool intel_vsec_walk_dvsec(struct pci_dev *pdev,
0260 struct intel_vsec_platform_info *info)
0261 {
0262 bool have_devices = false;
0263 int pos = 0;
0264
0265 do {
0266 struct intel_vsec_header header;
0267 u32 table, hdr;
0268 u16 vid;
0269 int ret;
0270
0271 pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
0272 if (!pos)
0273 break;
0274
0275 pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER1, &hdr);
0276 vid = PCI_DVSEC_HEADER1_VID(hdr);
0277 if (vid != PCI_VENDOR_ID_INTEL)
0278 continue;
0279
0280
0281 header.rev = PCI_DVSEC_HEADER1_REV(hdr);
0282 if (header.rev != 1) {
0283 dev_info(&pdev->dev, "Unsupported DVSEC revision %d\n", header.rev);
0284 continue;
0285 }
0286
0287 header.length = PCI_DVSEC_HEADER1_LEN(hdr);
0288
0289 pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
0290 pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
0291 pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
0292
0293 header.tbir = INTEL_DVSEC_TABLE_BAR(table);
0294 header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
0295
0296 pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
0297 header.id = PCI_DVSEC_HEADER2_ID(hdr);
0298
0299 ret = intel_vsec_add_dev(pdev, &header, info);
0300 if (ret)
0301 continue;
0302
0303 have_devices = true;
0304 } while (true);
0305
0306 return have_devices;
0307 }
0308
0309 static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
0310 struct intel_vsec_platform_info *info)
0311 {
0312 bool have_devices = false;
0313 int pos = 0;
0314
0315 do {
0316 struct intel_vsec_header header;
0317 u32 table, hdr;
0318 int ret;
0319
0320 pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR);
0321 if (!pos)
0322 break;
0323
0324 pci_read_config_dword(pdev, pos + PCI_VNDR_HEADER, &hdr);
0325
0326
0327 header.rev = PCI_VNDR_HEADER_REV(hdr);
0328 if (header.rev != 1) {
0329 dev_info(&pdev->dev, "Unsupported VSEC revision %d\n", header.rev);
0330 continue;
0331 }
0332
0333 header.id = PCI_VNDR_HEADER_ID(hdr);
0334 header.length = PCI_VNDR_HEADER_LEN(hdr);
0335
0336
0337 pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
0338 pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
0339 pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
0340
0341 header.tbir = INTEL_DVSEC_TABLE_BAR(table);
0342 header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
0343
0344 ret = intel_vsec_add_dev(pdev, &header, info);
0345 if (ret)
0346 continue;
0347
0348 have_devices = true;
0349 } while (true);
0350
0351 return have_devices;
0352 }
0353
0354 static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
0355 {
0356 struct intel_vsec_platform_info *info;
0357 bool have_devices = false;
0358 int ret;
0359
0360 ret = pcim_enable_device(pdev);
0361 if (ret)
0362 return ret;
0363
0364 pci_save_state(pdev);
0365 info = (struct intel_vsec_platform_info *)id->driver_data;
0366 if (!info)
0367 return -EINVAL;
0368
0369 if (intel_vsec_walk_dvsec(pdev, info))
0370 have_devices = true;
0371
0372 if (intel_vsec_walk_vsec(pdev, info))
0373 have_devices = true;
0374
0375 if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
0376 intel_vsec_walk_header(pdev, info))
0377 have_devices = true;
0378
0379 if (!have_devices)
0380 return -ENODEV;
0381
0382 return 0;
0383 }
0384
0385
0386 static const struct intel_vsec_platform_info tgl_info = {
0387 .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG |
0388 VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW,
0389 };
0390
0391
0392 static struct intel_vsec_header dg1_telemetry = {
0393 .length = 0x10,
0394 .id = 2,
0395 .num_entries = 1,
0396 .entry_size = 3,
0397 .tbir = 0,
0398 .offset = 0x466000,
0399 };
0400
0401 static struct intel_vsec_header *dg1_capabilities[] = {
0402 &dg1_telemetry,
0403 NULL
0404 };
0405
0406 static const struct intel_vsec_platform_info dg1_info = {
0407 .capabilities = dg1_capabilities,
0408 .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
0409 };
0410
0411 #define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
0412 #define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
0413 #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
0414 #define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
0415 #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
0416 static const struct pci_device_id intel_vsec_pci_ids[] = {
0417 { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
0418 { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
0419 { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) },
0420 { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
0421 { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
0422 { }
0423 };
0424 MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
0425
0426 static pci_ers_result_t intel_vsec_pci_error_detected(struct pci_dev *pdev,
0427 pci_channel_state_t state)
0428 {
0429 pci_ers_result_t status = PCI_ERS_RESULT_NEED_RESET;
0430
0431 dev_info(&pdev->dev, "PCI error detected, state %d", state);
0432
0433 if (state == pci_channel_io_perm_failure)
0434 status = PCI_ERS_RESULT_DISCONNECT;
0435 else
0436 pci_disable_device(pdev);
0437
0438 return status;
0439 }
0440
0441 static pci_ers_result_t intel_vsec_pci_slot_reset(struct pci_dev *pdev)
0442 {
0443 struct intel_vsec_device *intel_vsec_dev;
0444 pci_ers_result_t status = PCI_ERS_RESULT_DISCONNECT;
0445 const struct pci_device_id *pci_dev_id;
0446 unsigned long index;
0447
0448 dev_info(&pdev->dev, "Resetting PCI slot\n");
0449
0450 msleep(2000);
0451 if (pci_enable_device(pdev)) {
0452 dev_info(&pdev->dev,
0453 "Failed to re-enable PCI device after reset.\n");
0454 goto out;
0455 }
0456
0457 status = PCI_ERS_RESULT_RECOVERED;
0458
0459 xa_for_each(&auxdev_array, index, intel_vsec_dev) {
0460
0461 if (pdev != intel_vsec_dev->pcidev)
0462 continue;
0463 devm_release_action(&pdev->dev, intel_vsec_remove_aux,
0464 &intel_vsec_dev->auxdev);
0465 }
0466 pci_disable_device(pdev);
0467 pci_restore_state(pdev);
0468 pci_dev_id = pci_match_id(intel_vsec_pci_ids, pdev);
0469 intel_vsec_pci_probe(pdev, pci_dev_id);
0470
0471 out:
0472 return status;
0473 }
0474
0475 static void intel_vsec_pci_resume(struct pci_dev *pdev)
0476 {
0477 dev_info(&pdev->dev, "Done resuming PCI device\n");
0478 }
0479
0480 static const struct pci_error_handlers intel_vsec_pci_err_handlers = {
0481 .error_detected = intel_vsec_pci_error_detected,
0482 .slot_reset = intel_vsec_pci_slot_reset,
0483 .resume = intel_vsec_pci_resume,
0484 };
0485
0486 static struct pci_driver intel_vsec_pci_driver = {
0487 .name = "intel_vsec",
0488 .id_table = intel_vsec_pci_ids,
0489 .probe = intel_vsec_pci_probe,
0490 .err_handler = &intel_vsec_pci_err_handlers,
0491 };
0492 module_pci_driver(intel_vsec_pci_driver);
0493
0494 MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
0495 MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver");
0496 MODULE_LICENSE("GPL v2");