0001
0002
0003 #include <linux/io-64-nonatomic-lo-hi.h>
0004 #include <linux/device.h>
0005 #include <linux/delay.h>
0006 #include <linux/pci.h>
0007 #include <linux/pci-doe.h>
0008 #include <cxlpci.h>
0009 #include <cxlmem.h>
0010 #include <cxl.h>
0011 #include "core.h"
0012
0013
0014
0015
0016
0017
0018
0019
0020 static unsigned short media_ready_timeout = 60;
0021 module_param(media_ready_timeout, ushort, 0644);
0022 MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
0023
0024 struct cxl_walk_context {
0025 struct pci_bus *bus;
0026 struct cxl_port *port;
0027 int type;
0028 int error;
0029 int count;
0030 };
0031
0032 static int match_add_dports(struct pci_dev *pdev, void *data)
0033 {
0034 struct cxl_walk_context *ctx = data;
0035 struct cxl_port *port = ctx->port;
0036 int type = pci_pcie_type(pdev);
0037 struct cxl_register_map map;
0038 struct cxl_dport *dport;
0039 u32 lnkcap, port_num;
0040 int rc;
0041
0042 if (pdev->bus != ctx->bus)
0043 return 0;
0044 if (!pci_is_pcie(pdev))
0045 return 0;
0046 if (type != ctx->type)
0047 return 0;
0048 if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
0049 &lnkcap))
0050 return 0;
0051
0052 rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
0053 if (rc)
0054 dev_dbg(&port->dev, "failed to find component registers\n");
0055
0056 port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
0057 dport = devm_cxl_add_dport(port, &pdev->dev, port_num,
0058 cxl_regmap_to_base(pdev, &map));
0059 if (IS_ERR(dport)) {
0060 ctx->error = PTR_ERR(dport);
0061 return PTR_ERR(dport);
0062 }
0063 ctx->count++;
0064
0065 dev_dbg(&port->dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
0066
0067 return 0;
0068 }
0069
0070
0071
0072
0073
0074
0075
0076
0077 int devm_cxl_port_enumerate_dports(struct cxl_port *port)
0078 {
0079 struct pci_bus *bus = cxl_port_to_pci_bus(port);
0080 struct cxl_walk_context ctx;
0081 int type;
0082
0083 if (!bus)
0084 return -ENXIO;
0085
0086 if (pci_is_root_bus(bus))
0087 type = PCI_EXP_TYPE_ROOT_PORT;
0088 else
0089 type = PCI_EXP_TYPE_DOWNSTREAM;
0090
0091 ctx = (struct cxl_walk_context) {
0092 .port = port,
0093 .bus = bus,
0094 .type = type,
0095 };
0096 pci_walk_bus(bus, match_add_dports, &ctx);
0097
0098 if (ctx.count == 0)
0099 return -ENODEV;
0100 if (ctx.error)
0101 return ctx.error;
0102 return ctx.count;
0103 }
0104 EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
0105
0106
0107
0108
0109
0110 int cxl_await_media_ready(struct cxl_dev_state *cxlds)
0111 {
0112 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
0113 int d = cxlds->cxl_dvsec;
0114 bool active = false;
0115 u64 md_status;
0116 int rc, i;
0117
0118 for (i = media_ready_timeout; i; i--) {
0119 u32 temp;
0120
0121 rc = pci_read_config_dword(
0122 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
0123 if (rc)
0124 return rc;
0125
0126 active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp);
0127 if (active)
0128 break;
0129 msleep(1000);
0130 }
0131
0132 if (!active) {
0133 dev_err(&pdev->dev,
0134 "timeout awaiting memory active after %d seconds\n",
0135 media_ready_timeout);
0136 return -ETIMEDOUT;
0137 }
0138
0139 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
0140 if (!CXLMDEV_READY(md_status))
0141 return -EIO;
0142
0143 return 0;
0144 }
0145 EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
0146
0147 static int wait_for_valid(struct cxl_dev_state *cxlds)
0148 {
0149 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
0150 int d = cxlds->cxl_dvsec, rc;
0151 u32 val;
0152
0153
0154
0155
0156
0157
0158
0159
0160 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
0161 if (rc)
0162 return rc;
0163
0164 if (val & CXL_DVSEC_MEM_INFO_VALID)
0165 return 0;
0166
0167 msleep(1500);
0168
0169 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
0170 if (rc)
0171 return rc;
0172
0173 if (val & CXL_DVSEC_MEM_INFO_VALID)
0174 return 0;
0175
0176 return -ETIMEDOUT;
0177 }
0178
0179 static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
0180 {
0181 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
0182 int d = cxlds->cxl_dvsec;
0183 u16 ctrl;
0184 int rc;
0185
0186 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
0187 if (rc < 0)
0188 return rc;
0189
0190 if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val)
0191 return 1;
0192 ctrl &= ~CXL_DVSEC_MEM_ENABLE;
0193 ctrl |= val;
0194
0195 rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl);
0196 if (rc < 0)
0197 return rc;
0198
0199 return 0;
0200 }
0201
0202 static void clear_mem_enable(void *cxlds)
0203 {
0204 cxl_set_mem_enable(cxlds, 0);
0205 }
0206
0207 static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
0208 {
0209 int rc;
0210
0211 rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE);
0212 if (rc < 0)
0213 return rc;
0214 if (rc > 0)
0215 return 0;
0216 return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
0217 }
0218
0219 static bool range_contains(struct range *r1, struct range *r2)
0220 {
0221 return r1->start <= r2->start && r1->end >= r2->end;
0222 }
0223
0224
0225 static int dvsec_range_allowed(struct device *dev, void *arg)
0226 {
0227 struct range *dev_range = arg;
0228 struct cxl_decoder *cxld;
0229
0230 if (!is_root_decoder(dev))
0231 return 0;
0232
0233 cxld = to_cxl_decoder(dev);
0234
0235 if (!(cxld->flags & CXL_DECODER_F_LOCK))
0236 return 0;
0237 if (!(cxld->flags & CXL_DECODER_F_RAM))
0238 return 0;
0239
0240 return range_contains(&cxld->hpa_range, dev_range);
0241 }
0242
0243 static void disable_hdm(void *_cxlhdm)
0244 {
0245 u32 global_ctrl;
0246 struct cxl_hdm *cxlhdm = _cxlhdm;
0247 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
0248
0249 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
0250 writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE,
0251 hdm + CXL_HDM_DECODER_CTRL_OFFSET);
0252 }
0253
0254 static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
0255 {
0256 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
0257 u32 global_ctrl;
0258
0259 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
0260 writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
0261 hdm + CXL_HDM_DECODER_CTRL_OFFSET);
0262
0263 return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
0264 }
0265
0266 static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
0267 struct cxl_hdm *cxlhdm,
0268 struct cxl_endpoint_dvsec_info *info)
0269 {
0270 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
0271 struct cxl_port *port = cxlhdm->port;
0272 struct device *dev = cxlds->dev;
0273 struct cxl_port *root;
0274 int i, rc, allowed;
0275 u32 global_ctrl;
0276
0277 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
0278
0279
0280
0281
0282
0283 if (global_ctrl & CXL_HDM_DECODER_ENABLE) {
0284 rc = devm_cxl_enable_mem(&port->dev, cxlds);
0285 if (rc)
0286 return false;
0287 return true;
0288 }
0289
0290 root = to_cxl_port(port->dev.parent);
0291 while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
0292 root = to_cxl_port(root->dev.parent);
0293 if (!is_cxl_root(root)) {
0294 dev_err(dev, "Failed to acquire root port for HDM enable\n");
0295 return false;
0296 }
0297
0298 for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
0299 struct device *cxld_dev;
0300
0301 cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
0302 dvsec_range_allowed);
0303 if (!cxld_dev) {
0304 dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
0305 continue;
0306 }
0307 dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
0308 put_device(cxld_dev);
0309 allowed++;
0310 }
0311
0312 if (!allowed) {
0313 cxl_set_mem_enable(cxlds, 0);
0314 info->mem_enabled = 0;
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 if (info->mem_enabled)
0327 return false;
0328
0329 rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
0330 if (rc)
0331 return false;
0332
0333 rc = devm_cxl_enable_mem(&port->dev, cxlds);
0334 if (rc)
0335 return false;
0336
0337 return true;
0338 }
0339
0340
0341
0342
0343
0344
0345
0346
0347 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm)
0348 {
0349 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
0350 struct cxl_endpoint_dvsec_info info = { 0 };
0351 int hdm_count, rc, i, ranges = 0;
0352 struct device *dev = &pdev->dev;
0353 int d = cxlds->cxl_dvsec;
0354 u16 cap, ctrl;
0355
0356 if (!d) {
0357 dev_dbg(dev, "No DVSEC Capability\n");
0358 return -ENXIO;
0359 }
0360
0361 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
0362 if (rc)
0363 return rc;
0364
0365 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
0366 if (rc)
0367 return rc;
0368
0369 if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
0370 dev_dbg(dev, "Not MEM Capable\n");
0371 return -ENXIO;
0372 }
0373
0374
0375
0376
0377
0378
0379
0380 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
0381 if (!hdm_count || hdm_count > 2)
0382 return -EINVAL;
0383
0384 rc = wait_for_valid(cxlds);
0385 if (rc) {
0386 dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
0387 return rc;
0388 }
0389
0390
0391
0392
0393
0394
0395 info.mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
0396 if (!info.mem_enabled)
0397 goto hdm_init;
0398
0399 for (i = 0; i < hdm_count; i++) {
0400 u64 base, size;
0401 u32 temp;
0402
0403 rc = pci_read_config_dword(
0404 pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
0405 if (rc)
0406 return rc;
0407
0408 size = (u64)temp << 32;
0409
0410 rc = pci_read_config_dword(
0411 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
0412 if (rc)
0413 return rc;
0414
0415 size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
0416
0417 rc = pci_read_config_dword(
0418 pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
0419 if (rc)
0420 return rc;
0421
0422 base = (u64)temp << 32;
0423
0424 rc = pci_read_config_dword(
0425 pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
0426 if (rc)
0427 return rc;
0428
0429 base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
0430
0431 info.dvsec_range[i] = (struct range) {
0432 .start = base,
0433 .end = base + size - 1
0434 };
0435
0436 if (size)
0437 ranges++;
0438 }
0439
0440 info.ranges = ranges;
0441
0442
0443
0444
0445
0446 hdm_init:
0447 if (!__cxl_hdm_decode_init(cxlds, cxlhdm, &info)) {
0448 dev_err(dev,
0449 "Legacy range registers configuration prevents HDM operation.\n");
0450 return -EBUSY;
0451 }
0452
0453 return 0;
0454 }
0455 EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
0456
0457 #define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
0458 #define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
0459 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
0460 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
0461 #define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
0462 #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
0463 #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
0464
0465 static struct pci_doe_mb *find_cdat_doe(struct device *uport)
0466 {
0467 struct cxl_memdev *cxlmd;
0468 struct cxl_dev_state *cxlds;
0469 unsigned long index;
0470 void *entry;
0471
0472 cxlmd = to_cxl_memdev(uport);
0473 cxlds = cxlmd->cxlds;
0474
0475 xa_for_each(&cxlds->doe_mbs, index, entry) {
0476 struct pci_doe_mb *cur = entry;
0477
0478 if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL,
0479 CXL_DOE_PROTOCOL_TABLE_ACCESS))
0480 return cur;
0481 }
0482
0483 return NULL;
0484 }
0485
0486 #define CDAT_DOE_REQ(entry_handle) \
0487 (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
0488 CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
0489 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
0490 CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
0491 FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
0492
0493 static void cxl_doe_task_complete(struct pci_doe_task *task)
0494 {
0495 complete(task->private);
0496 }
0497
0498 struct cdat_doe_task {
0499 u32 request_pl;
0500 u32 response_pl[32];
0501 struct completion c;
0502 struct pci_doe_task task;
0503 };
0504
0505 #define DECLARE_CDAT_DOE_TASK(req, cdt) \
0506 struct cdat_doe_task cdt = { \
0507 .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \
0508 .request_pl = req, \
0509 .task = { \
0510 .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \
0511 .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \
0512 .request_pl = &cdt.request_pl, \
0513 .request_pl_sz = sizeof(cdt.request_pl), \
0514 .response_pl = cdt.response_pl, \
0515 .response_pl_sz = sizeof(cdt.response_pl), \
0516 .complete = cxl_doe_task_complete, \
0517 .private = &cdt.c, \
0518 } \
0519 }
0520
0521 static int cxl_cdat_get_length(struct device *dev,
0522 struct pci_doe_mb *cdat_doe,
0523 size_t *length)
0524 {
0525 DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t);
0526 int rc;
0527
0528 rc = pci_doe_submit_task(cdat_doe, &t.task);
0529 if (rc < 0) {
0530 dev_err(dev, "DOE submit failed: %d", rc);
0531 return rc;
0532 }
0533 wait_for_completion(&t.c);
0534 if (t.task.rv < sizeof(u32))
0535 return -EIO;
0536
0537 *length = t.response_pl[1];
0538 dev_dbg(dev, "CDAT length %zu\n", *length);
0539
0540 return 0;
0541 }
0542
0543 static int cxl_cdat_read_table(struct device *dev,
0544 struct pci_doe_mb *cdat_doe,
0545 struct cxl_cdat *cdat)
0546 {
0547 size_t length = cdat->length;
0548 u32 *data = cdat->table;
0549 int entry_handle = 0;
0550
0551 do {
0552 DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
0553 size_t entry_dw;
0554 u32 *entry;
0555 int rc;
0556
0557 rc = pci_doe_submit_task(cdat_doe, &t.task);
0558 if (rc < 0) {
0559 dev_err(dev, "DOE submit failed: %d", rc);
0560 return rc;
0561 }
0562 wait_for_completion(&t.c);
0563
0564 if (t.task.rv < (2 * sizeof(u32)))
0565 return -EIO;
0566
0567
0568 entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
0569 t.response_pl[0]);
0570 entry = t.response_pl + 1;
0571 entry_dw = t.task.rv / sizeof(u32);
0572
0573 entry_dw -= 1;
0574 entry_dw = min(length / sizeof(u32), entry_dw);
0575
0576 if (entry_dw) {
0577 memcpy(data, entry, entry_dw * sizeof(u32));
0578 length -= entry_dw * sizeof(u32);
0579 data += entry_dw;
0580 }
0581 } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
0582
0583 return 0;
0584 }
0585
0586
0587
0588
0589
0590
0591
0592 void read_cdat_data(struct cxl_port *port)
0593 {
0594 struct pci_doe_mb *cdat_doe;
0595 struct device *dev = &port->dev;
0596 struct device *uport = port->uport;
0597 size_t cdat_length;
0598 int rc;
0599
0600 cdat_doe = find_cdat_doe(uport);
0601 if (!cdat_doe) {
0602 dev_dbg(dev, "No CDAT mailbox\n");
0603 return;
0604 }
0605
0606 port->cdat_available = true;
0607
0608 if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
0609 dev_dbg(dev, "No CDAT length\n");
0610 return;
0611 }
0612
0613 port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL);
0614 if (!port->cdat.table)
0615 return;
0616
0617 port->cdat.length = cdat_length;
0618 rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat);
0619 if (rc) {
0620
0621 devm_kfree(dev, port->cdat.table);
0622 port->cdat.table = NULL;
0623 port->cdat.length = 0;
0624 dev_err(dev, "CDAT data read error\n");
0625 }
0626 }
0627 EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);