0001
0002
0003 #include <linux/io-64-nonatomic-hi-lo.h>
0004 #include <linux/seq_file.h>
0005 #include <linux/device.h>
0006 #include <linux/delay.h>
0007
0008 #include "cxlmem.h"
0009 #include "core.h"
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 DECLARE_RWSEM(cxl_dpa_rwsem);
0021
0022 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
0023 int *target_map)
0024 {
0025 int rc;
0026
0027 rc = cxl_decoder_add_locked(cxld, target_map);
0028 if (rc) {
0029 put_device(&cxld->dev);
0030 dev_err(&port->dev, "Failed to add decoder\n");
0031 return rc;
0032 }
0033
0034 rc = cxl_decoder_autoremove(&port->dev, cxld);
0035 if (rc)
0036 return rc;
0037
0038 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
0039
0040 return 0;
0041 }
0042
0043
0044
0045
0046
0047
0048
0049
0050 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
0051 {
0052 struct cxl_switch_decoder *cxlsd;
0053 struct cxl_dport *dport = NULL;
0054 int single_port_map[1];
0055 unsigned long index;
0056
0057 cxlsd = cxl_switch_decoder_alloc(port, 1);
0058 if (IS_ERR(cxlsd))
0059 return PTR_ERR(cxlsd);
0060
0061 device_lock_assert(&port->dev);
0062
0063 xa_for_each(&port->dports, index, dport)
0064 break;
0065 single_port_map[0] = dport->port_id;
0066
0067 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
0068 }
0069 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
0070
0071 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
0072 {
0073 u32 hdm_cap;
0074
0075 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
0076 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
0077 cxlhdm->target_count =
0078 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
0079 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
0080 cxlhdm->interleave_mask |= GENMASK(11, 8);
0081 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
0082 cxlhdm->interleave_mask |= GENMASK(14, 12);
0083 }
0084
0085 static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
0086 void __iomem *crb)
0087 {
0088 struct cxl_component_reg_map map;
0089
0090 cxl_probe_component_regs(&port->dev, crb, &map);
0091 if (!map.hdm_decoder.valid) {
0092 dev_err(&port->dev, "HDM decoder registers invalid\n");
0093 return IOMEM_ERR_PTR(-ENXIO);
0094 }
0095
0096 return crb + map.hdm_decoder.offset;
0097 }
0098
0099
0100
0101
0102
0103 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
0104 {
0105 struct device *dev = &port->dev;
0106 void __iomem *crb, *hdm;
0107 struct cxl_hdm *cxlhdm;
0108
0109 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
0110 if (!cxlhdm)
0111 return ERR_PTR(-ENOMEM);
0112
0113 cxlhdm->port = port;
0114 crb = devm_cxl_iomap_block(dev, port->component_reg_phys,
0115 CXL_COMPONENT_REG_BLOCK_SIZE);
0116 if (!crb) {
0117 dev_err(dev, "No component registers mapped\n");
0118 return ERR_PTR(-ENXIO);
0119 }
0120
0121 hdm = map_hdm_decoder_regs(port, crb);
0122 if (IS_ERR(hdm))
0123 return ERR_CAST(hdm);
0124 cxlhdm->regs.hdm_decoder = hdm;
0125
0126 parse_hdm_decoder_caps(cxlhdm);
0127 if (cxlhdm->decoder_count == 0) {
0128 dev_err(dev, "Spec violation. Caps invalid\n");
0129 return ERR_PTR(-ENXIO);
0130 }
0131
0132 dev_set_drvdata(dev, cxlhdm);
0133
0134 return cxlhdm;
0135 }
0136 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
0137
0138 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
0139 {
0140 unsigned long long start = r->start, end = r->end;
0141
0142 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
0143 r->name);
0144 }
0145
0146 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
0147 {
0148 struct resource *p1, *p2;
0149
0150 down_read(&cxl_dpa_rwsem);
0151 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
0152 __cxl_dpa_debug(file, p1, 0);
0153 for (p2 = p1->child; p2; p2 = p2->sibling)
0154 __cxl_dpa_debug(file, p2, 1);
0155 }
0156 up_read(&cxl_dpa_rwsem);
0157 }
0158 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
0159
0160
0161
0162
0163
0164 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
0165 {
0166 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0167 struct cxl_port *port = cxled_to_port(cxled);
0168 struct cxl_dev_state *cxlds = cxlmd->cxlds;
0169 struct resource *res = cxled->dpa_res;
0170 resource_size_t skip_start;
0171
0172 lockdep_assert_held_write(&cxl_dpa_rwsem);
0173
0174
0175 skip_start = res->start - cxled->skip;
0176 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
0177 if (cxled->skip)
0178 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
0179 cxled->skip = 0;
0180 cxled->dpa_res = NULL;
0181 put_device(&cxled->cxld.dev);
0182 port->hdm_end--;
0183 }
0184
0185 static void cxl_dpa_release(void *cxled)
0186 {
0187 down_write(&cxl_dpa_rwsem);
0188 __cxl_dpa_release(cxled);
0189 up_write(&cxl_dpa_rwsem);
0190 }
0191
0192
0193
0194
0195
0196 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
0197 {
0198 struct cxl_port *port = cxled_to_port(cxled);
0199
0200 lockdep_assert_held_write(&cxl_dpa_rwsem);
0201 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
0202 __cxl_dpa_release(cxled);
0203 }
0204
0205 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
0206 resource_size_t base, resource_size_t len,
0207 resource_size_t skipped)
0208 {
0209 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0210 struct cxl_port *port = cxled_to_port(cxled);
0211 struct cxl_dev_state *cxlds = cxlmd->cxlds;
0212 struct device *dev = &port->dev;
0213 struct resource *res;
0214
0215 lockdep_assert_held_write(&cxl_dpa_rwsem);
0216
0217 if (!len)
0218 goto success;
0219
0220 if (cxled->dpa_res) {
0221 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
0222 port->id, cxled->cxld.id, cxled->dpa_res);
0223 return -EBUSY;
0224 }
0225
0226 if (port->hdm_end + 1 != cxled->cxld.id) {
0227
0228
0229
0230
0231
0232
0233 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
0234 cxled->cxld.id, port->id, port->hdm_end + 1);
0235 return -EBUSY;
0236 }
0237
0238 if (skipped) {
0239 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
0240 dev_name(&cxled->cxld.dev), 0);
0241 if (!res) {
0242 dev_dbg(dev,
0243 "decoder%d.%d: failed to reserve skipped space\n",
0244 port->id, cxled->cxld.id);
0245 return -EBUSY;
0246 }
0247 }
0248 res = __request_region(&cxlds->dpa_res, base, len,
0249 dev_name(&cxled->cxld.dev), 0);
0250 if (!res) {
0251 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
0252 port->id, cxled->cxld.id);
0253 if (skipped)
0254 __release_region(&cxlds->dpa_res, base - skipped,
0255 skipped);
0256 return -EBUSY;
0257 }
0258 cxled->dpa_res = res;
0259 cxled->skip = skipped;
0260
0261 if (resource_contains(&cxlds->pmem_res, res))
0262 cxled->mode = CXL_DECODER_PMEM;
0263 else if (resource_contains(&cxlds->ram_res, res))
0264 cxled->mode = CXL_DECODER_RAM;
0265 else {
0266 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
0267 cxled->cxld.id, cxled->dpa_res);
0268 cxled->mode = CXL_DECODER_MIXED;
0269 }
0270
0271 success:
0272 port->hdm_end++;
0273 get_device(&cxled->cxld.dev);
0274 return 0;
0275 }
0276
0277 static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
0278 resource_size_t base, resource_size_t len,
0279 resource_size_t skipped)
0280 {
0281 struct cxl_port *port = cxled_to_port(cxled);
0282 int rc;
0283
0284 down_write(&cxl_dpa_rwsem);
0285 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
0286 up_write(&cxl_dpa_rwsem);
0287
0288 if (rc)
0289 return rc;
0290
0291 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
0292 }
0293
0294 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
0295 {
0296 resource_size_t size = 0;
0297
0298 down_read(&cxl_dpa_rwsem);
0299 if (cxled->dpa_res)
0300 size = resource_size(cxled->dpa_res);
0301 up_read(&cxl_dpa_rwsem);
0302
0303 return size;
0304 }
0305
0306 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
0307 {
0308 resource_size_t base = -1;
0309
0310 down_read(&cxl_dpa_rwsem);
0311 if (cxled->dpa_res)
0312 base = cxled->dpa_res->start;
0313 up_read(&cxl_dpa_rwsem);
0314
0315 return base;
0316 }
0317
0318 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
0319 {
0320 struct cxl_port *port = cxled_to_port(cxled);
0321 struct device *dev = &cxled->cxld.dev;
0322 int rc;
0323
0324 down_write(&cxl_dpa_rwsem);
0325 if (!cxled->dpa_res) {
0326 rc = 0;
0327 goto out;
0328 }
0329 if (cxled->cxld.region) {
0330 dev_dbg(dev, "decoder assigned to: %s\n",
0331 dev_name(&cxled->cxld.region->dev));
0332 rc = -EBUSY;
0333 goto out;
0334 }
0335 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
0336 dev_dbg(dev, "decoder enabled\n");
0337 rc = -EBUSY;
0338 goto out;
0339 }
0340 if (cxled->cxld.id != port->hdm_end) {
0341 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
0342 port->hdm_end);
0343 rc = -EBUSY;
0344 goto out;
0345 }
0346 devm_cxl_dpa_release(cxled);
0347 rc = 0;
0348 out:
0349 up_write(&cxl_dpa_rwsem);
0350 return rc;
0351 }
0352
0353 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
0354 enum cxl_decoder_mode mode)
0355 {
0356 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0357 struct cxl_dev_state *cxlds = cxlmd->cxlds;
0358 struct device *dev = &cxled->cxld.dev;
0359 int rc;
0360
0361 switch (mode) {
0362 case CXL_DECODER_RAM:
0363 case CXL_DECODER_PMEM:
0364 break;
0365 default:
0366 dev_dbg(dev, "unsupported mode: %d\n", mode);
0367 return -EINVAL;
0368 }
0369
0370 down_write(&cxl_dpa_rwsem);
0371 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
0372 rc = -EBUSY;
0373 goto out;
0374 }
0375
0376
0377
0378
0379
0380 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
0381 dev_dbg(dev, "no available pmem capacity\n");
0382 rc = -ENXIO;
0383 goto out;
0384 }
0385 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
0386 dev_dbg(dev, "no available ram capacity\n");
0387 rc = -ENXIO;
0388 goto out;
0389 }
0390
0391 cxled->mode = mode;
0392 rc = 0;
0393 out:
0394 up_write(&cxl_dpa_rwsem);
0395
0396 return rc;
0397 }
0398
0399 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
0400 {
0401 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0402 resource_size_t free_ram_start, free_pmem_start;
0403 struct cxl_port *port = cxled_to_port(cxled);
0404 struct cxl_dev_state *cxlds = cxlmd->cxlds;
0405 struct device *dev = &cxled->cxld.dev;
0406 resource_size_t start, avail, skip;
0407 struct resource *p, *last;
0408 int rc;
0409
0410 down_write(&cxl_dpa_rwsem);
0411 if (cxled->cxld.region) {
0412 dev_dbg(dev, "decoder attached to %s\n",
0413 dev_name(&cxled->cxld.region->dev));
0414 rc = -EBUSY;
0415 goto out;
0416 }
0417
0418 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
0419 dev_dbg(dev, "decoder enabled\n");
0420 rc = -EBUSY;
0421 goto out;
0422 }
0423
0424 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
0425 last = p;
0426 if (last)
0427 free_ram_start = last->end + 1;
0428 else
0429 free_ram_start = cxlds->ram_res.start;
0430
0431 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
0432 last = p;
0433 if (last)
0434 free_pmem_start = last->end + 1;
0435 else
0436 free_pmem_start = cxlds->pmem_res.start;
0437
0438 if (cxled->mode == CXL_DECODER_RAM) {
0439 start = free_ram_start;
0440 avail = cxlds->ram_res.end - start + 1;
0441 skip = 0;
0442 } else if (cxled->mode == CXL_DECODER_PMEM) {
0443 resource_size_t skip_start, skip_end;
0444
0445 start = free_pmem_start;
0446 avail = cxlds->pmem_res.end - start + 1;
0447 skip_start = free_ram_start;
0448
0449
0450
0451
0452
0453 if (cxlds->pmem_res.child &&
0454 skip_start == cxlds->pmem_res.child->start)
0455 skip_end = skip_start - 1;
0456 else
0457 skip_end = start - 1;
0458 skip = skip_end - skip_start + 1;
0459 } else {
0460 dev_dbg(dev, "mode not set\n");
0461 rc = -EINVAL;
0462 goto out;
0463 }
0464
0465 if (size > avail) {
0466 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
0467 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
0468 &avail);
0469 rc = -ENOSPC;
0470 goto out;
0471 }
0472
0473 rc = __cxl_dpa_reserve(cxled, start, size, skip);
0474 out:
0475 up_write(&cxl_dpa_rwsem);
0476
0477 if (rc)
0478 return rc;
0479
0480 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
0481 }
0482
0483 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
0484 {
0485 u16 eig;
0486 u8 eiw;
0487
0488
0489
0490
0491
0492 if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw),
0493 "invalid interleave_ways: %d\n", cxld->interleave_ways))
0494 return;
0495 if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig),
0496 "invalid interleave_granularity: %d\n",
0497 cxld->interleave_granularity))
0498 return;
0499
0500 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
0501 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
0502 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
0503 }
0504
0505 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
0506 {
0507 u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
0508 CXL_HDM_DECODER0_CTRL_TYPE);
0509 }
0510
0511 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
0512 {
0513 struct cxl_dport **t = &cxlsd->target[0];
0514 int ways = cxlsd->cxld.interleave_ways;
0515
0516 if (dev_WARN_ONCE(&cxlsd->cxld.dev,
0517 ways > 8 || ways > cxlsd->nr_targets,
0518 "ways: %d overflows targets: %d\n", ways,
0519 cxlsd->nr_targets))
0520 return -ENXIO;
0521
0522 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
0523 if (ways > 1)
0524 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
0525 if (ways > 2)
0526 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
0527 if (ways > 3)
0528 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
0529 if (ways > 4)
0530 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
0531 if (ways > 5)
0532 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
0533 if (ways > 6)
0534 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
0535 if (ways > 7)
0536 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
0537
0538 return 0;
0539 }
0540
0541
0542
0543
0544
0545
0546 #define COMMIT_TIMEOUT_MS 20
0547 static int cxld_await_commit(void __iomem *hdm, int id)
0548 {
0549 u32 ctrl;
0550 int i;
0551
0552 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
0553 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
0554 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
0555 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
0556 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
0557 return -EIO;
0558 }
0559 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
0560 return 0;
0561 fsleep(1000);
0562 }
0563
0564 return -ETIMEDOUT;
0565 }
0566
0567 static int cxl_decoder_commit(struct cxl_decoder *cxld)
0568 {
0569 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
0570 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
0571 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
0572 int id = cxld->id, rc;
0573 u64 base, size;
0574 u32 ctrl;
0575
0576 if (cxld->flags & CXL_DECODER_F_ENABLE)
0577 return 0;
0578
0579 if (port->commit_end + 1 != id) {
0580 dev_dbg(&port->dev,
0581 "%s: out of order commit, expected decoder%d.%d\n",
0582 dev_name(&cxld->dev), port->id, port->commit_end + 1);
0583 return -EBUSY;
0584 }
0585
0586 down_read(&cxl_dpa_rwsem);
0587
0588 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
0589 cxld_set_interleave(cxld, &ctrl);
0590 cxld_set_type(cxld, &ctrl);
0591 base = cxld->hpa_range.start;
0592 size = range_len(&cxld->hpa_range);
0593
0594 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
0595 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
0596 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
0597 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
0598
0599 if (is_switch_decoder(&cxld->dev)) {
0600 struct cxl_switch_decoder *cxlsd =
0601 to_cxl_switch_decoder(&cxld->dev);
0602 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
0603 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
0604 u64 targets;
0605
0606 rc = cxlsd_set_targets(cxlsd, &targets);
0607 if (rc) {
0608 dev_dbg(&port->dev, "%s: target configuration error\n",
0609 dev_name(&cxld->dev));
0610 goto err;
0611 }
0612
0613 writel(upper_32_bits(targets), tl_hi);
0614 writel(lower_32_bits(targets), tl_lo);
0615 } else {
0616 struct cxl_endpoint_decoder *cxled =
0617 to_cxl_endpoint_decoder(&cxld->dev);
0618 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
0619 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
0620
0621 writel(upper_32_bits(cxled->skip), sk_hi);
0622 writel(lower_32_bits(cxled->skip), sk_lo);
0623 }
0624
0625 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
0626 up_read(&cxl_dpa_rwsem);
0627
0628 port->commit_end++;
0629 rc = cxld_await_commit(hdm, cxld->id);
0630 err:
0631 if (rc) {
0632 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
0633 dev_name(&cxld->dev), rc);
0634 cxld->reset(cxld);
0635 return rc;
0636 }
0637 cxld->flags |= CXL_DECODER_F_ENABLE;
0638
0639 return 0;
0640 }
0641
0642 static int cxl_decoder_reset(struct cxl_decoder *cxld)
0643 {
0644 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
0645 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
0646 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
0647 int id = cxld->id;
0648 u32 ctrl;
0649
0650 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
0651 return 0;
0652
0653 if (port->commit_end != id) {
0654 dev_dbg(&port->dev,
0655 "%s: out of order reset, expected decoder%d.%d\n",
0656 dev_name(&cxld->dev), port->id, port->commit_end);
0657 return -EBUSY;
0658 }
0659
0660 down_read(&cxl_dpa_rwsem);
0661 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
0662 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
0663 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
0664
0665 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
0666 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
0667 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
0668 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
0669 up_read(&cxl_dpa_rwsem);
0670
0671 port->commit_end--;
0672 cxld->flags &= ~CXL_DECODER_F_ENABLE;
0673
0674 return 0;
0675 }
0676
0677 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
0678 int *target_map, void __iomem *hdm, int which,
0679 u64 *dpa_base)
0680 {
0681 struct cxl_endpoint_decoder *cxled = NULL;
0682 u64 size, base, skip, dpa_size;
0683 bool committed;
0684 u32 remainder;
0685 int i, rc;
0686 u32 ctrl;
0687 union {
0688 u64 value;
0689 unsigned char target_id[8];
0690 } target_list;
0691
0692 if (is_endpoint_decoder(&cxld->dev))
0693 cxled = to_cxl_endpoint_decoder(&cxld->dev);
0694
0695 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
0696 base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
0697 size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
0698 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
0699 cxld->commit = cxl_decoder_commit;
0700 cxld->reset = cxl_decoder_reset;
0701
0702 if (!committed)
0703 size = 0;
0704 if (base == U64_MAX || size == U64_MAX) {
0705 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
0706 port->id, cxld->id);
0707 return -ENXIO;
0708 }
0709
0710 cxld->hpa_range = (struct range) {
0711 .start = base,
0712 .end = base + size - 1,
0713 };
0714
0715
0716 if (committed) {
0717 cxld->flags |= CXL_DECODER_F_ENABLE;
0718 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
0719 cxld->flags |= CXL_DECODER_F_LOCK;
0720 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
0721 cxld->target_type = CXL_DECODER_EXPANDER;
0722 else
0723 cxld->target_type = CXL_DECODER_ACCELERATOR;
0724 if (cxld->id != port->commit_end + 1) {
0725 dev_warn(&port->dev,
0726 "decoder%d.%d: Committed out of order\n",
0727 port->id, cxld->id);
0728 return -ENXIO;
0729 }
0730 port->commit_end = cxld->id;
0731 } else {
0732
0733 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
0734 ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
0735 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
0736 }
0737 cxld->target_type = CXL_DECODER_EXPANDER;
0738 }
0739 rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
0740 &cxld->interleave_ways);
0741 if (rc) {
0742 dev_warn(&port->dev,
0743 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
0744 port->id, cxld->id, ctrl);
0745 return rc;
0746 }
0747 rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
0748 &cxld->interleave_granularity);
0749 if (rc)
0750 return rc;
0751
0752 if (!cxled) {
0753 target_list.value =
0754 ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
0755 for (i = 0; i < cxld->interleave_ways; i++)
0756 target_map[i] = target_list.target_id[i];
0757
0758 return 0;
0759 }
0760
0761 if (!committed)
0762 return 0;
0763
0764 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
0765 if (remainder) {
0766 dev_err(&port->dev,
0767 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
0768 port->id, cxld->id, size, cxld->interleave_ways);
0769 return -ENXIO;
0770 }
0771 skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
0772 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
0773 if (rc) {
0774 dev_err(&port->dev,
0775 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
0776 port->id, cxld->id, *dpa_base,
0777 *dpa_base + dpa_size + skip - 1, rc);
0778 return rc;
0779 }
0780 *dpa_base += dpa_size + skip;
0781 return 0;
0782 }
0783
0784
0785
0786
0787
0788 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
0789 {
0790 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
0791 struct cxl_port *port = cxlhdm->port;
0792 int i, committed;
0793 u64 dpa_base = 0;
0794 u32 ctrl;
0795
0796
0797
0798
0799
0800
0801
0802
0803 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
0804 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
0805 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
0806 committed++;
0807 }
0808
0809
0810 if (committed != cxlhdm->decoder_count)
0811 msleep(20);
0812
0813 for (i = 0; i < cxlhdm->decoder_count; i++) {
0814 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
0815 int rc, target_count = cxlhdm->target_count;
0816 struct cxl_decoder *cxld;
0817
0818 if (is_cxl_endpoint(port)) {
0819 struct cxl_endpoint_decoder *cxled;
0820
0821 cxled = cxl_endpoint_decoder_alloc(port);
0822 if (IS_ERR(cxled)) {
0823 dev_warn(&port->dev,
0824 "Failed to allocate the decoder\n");
0825 return PTR_ERR(cxled);
0826 }
0827 cxld = &cxled->cxld;
0828 } else {
0829 struct cxl_switch_decoder *cxlsd;
0830
0831 cxlsd = cxl_switch_decoder_alloc(port, target_count);
0832 if (IS_ERR(cxlsd)) {
0833 dev_warn(&port->dev,
0834 "Failed to allocate the decoder\n");
0835 return PTR_ERR(cxlsd);
0836 }
0837 cxld = &cxlsd->cxld;
0838 }
0839
0840 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
0841 if (rc) {
0842 put_device(&cxld->dev);
0843 return rc;
0844 }
0845 rc = add_hdm_decoder(port, cxld, target_map);
0846 if (rc) {
0847 dev_warn(&port->dev,
0848 "Failed to add decoder to port\n");
0849 return rc;
0850 }
0851 }
0852
0853 return 0;
0854 }
0855 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);