0001
0002
0003 #include <linux/io-64-nonatomic-lo-hi.h>
0004 #include <linux/memregion.h>
0005 #include <linux/workqueue.h>
0006 #include <linux/debugfs.h>
0007 #include <linux/device.h>
0008 #include <linux/module.h>
0009 #include <linux/pci.h>
0010 #include <linux/slab.h>
0011 #include <linux/idr.h>
0012 #include <cxlmem.h>
0013 #include <cxlpci.h>
0014 #include <cxl.h>
0015 #include "core.h"
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 static DEFINE_IDA(cxl_port_ida);
0032 static DEFINE_XARRAY(cxl_root_buses);
0033
0034 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
0035 char *buf)
0036 {
0037 return sysfs_emit(buf, "%s\n", dev->type->name);
0038 }
0039 static DEVICE_ATTR_RO(devtype);
0040
0041 static int cxl_device_id(struct device *dev)
0042 {
0043 if (dev->type == &cxl_nvdimm_bridge_type)
0044 return CXL_DEVICE_NVDIMM_BRIDGE;
0045 if (dev->type == &cxl_nvdimm_type)
0046 return CXL_DEVICE_NVDIMM;
0047 if (dev->type == CXL_PMEM_REGION_TYPE())
0048 return CXL_DEVICE_PMEM_REGION;
0049 if (is_cxl_port(dev)) {
0050 if (is_cxl_root(to_cxl_port(dev)))
0051 return CXL_DEVICE_ROOT;
0052 return CXL_DEVICE_PORT;
0053 }
0054 if (is_cxl_memdev(dev))
0055 return CXL_DEVICE_MEMORY_EXPANDER;
0056 if (dev->type == CXL_REGION_TYPE())
0057 return CXL_DEVICE_REGION;
0058 return 0;
0059 }
0060
0061 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
0062 char *buf)
0063 {
0064 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
0065 }
0066 static DEVICE_ATTR_RO(modalias);
0067
0068 static struct attribute *cxl_base_attributes[] = {
0069 &dev_attr_devtype.attr,
0070 &dev_attr_modalias.attr,
0071 NULL,
0072 };
0073
0074 struct attribute_group cxl_base_attribute_group = {
0075 .attrs = cxl_base_attributes,
0076 };
0077
0078 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
0079 char *buf)
0080 {
0081 struct cxl_decoder *cxld = to_cxl_decoder(dev);
0082
0083 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
0084 }
0085 static DEVICE_ATTR_ADMIN_RO(start);
0086
0087 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
0088 char *buf)
0089 {
0090 struct cxl_decoder *cxld = to_cxl_decoder(dev);
0091
0092 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
0093 }
0094 static DEVICE_ATTR_RO(size);
0095
0096 #define CXL_DECODER_FLAG_ATTR(name, flag) \
0097 static ssize_t name##_show(struct device *dev, \
0098 struct device_attribute *attr, char *buf) \
0099 { \
0100 struct cxl_decoder *cxld = to_cxl_decoder(dev); \
0101 \
0102 return sysfs_emit(buf, "%s\n", \
0103 (cxld->flags & (flag)) ? "1" : "0"); \
0104 } \
0105 static DEVICE_ATTR_RO(name)
0106
0107 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
0108 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
0109 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
0110 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
0111 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
0112
0113 static ssize_t target_type_show(struct device *dev,
0114 struct device_attribute *attr, char *buf)
0115 {
0116 struct cxl_decoder *cxld = to_cxl_decoder(dev);
0117
0118 switch (cxld->target_type) {
0119 case CXL_DECODER_ACCELERATOR:
0120 return sysfs_emit(buf, "accelerator\n");
0121 case CXL_DECODER_EXPANDER:
0122 return sysfs_emit(buf, "expander\n");
0123 }
0124 return -ENXIO;
0125 }
0126 static DEVICE_ATTR_RO(target_type);
0127
0128 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
0129 {
0130 struct cxl_decoder *cxld = &cxlsd->cxld;
0131 ssize_t offset = 0;
0132 int i, rc = 0;
0133
0134 for (i = 0; i < cxld->interleave_ways; i++) {
0135 struct cxl_dport *dport = cxlsd->target[i];
0136 struct cxl_dport *next = NULL;
0137
0138 if (!dport)
0139 break;
0140
0141 if (i + 1 < cxld->interleave_ways)
0142 next = cxlsd->target[i + 1];
0143 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
0144 next ? "," : "");
0145 if (rc < 0)
0146 return rc;
0147 offset += rc;
0148 }
0149
0150 return offset;
0151 }
0152
0153 static ssize_t target_list_show(struct device *dev,
0154 struct device_attribute *attr, char *buf)
0155 {
0156 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
0157 ssize_t offset;
0158 unsigned int seq;
0159 int rc;
0160
0161 do {
0162 seq = read_seqbegin(&cxlsd->target_lock);
0163 rc = emit_target_list(cxlsd, buf);
0164 } while (read_seqretry(&cxlsd->target_lock, seq));
0165
0166 if (rc < 0)
0167 return rc;
0168 offset = rc;
0169
0170 rc = sysfs_emit_at(buf, offset, "\n");
0171 if (rc < 0)
0172 return rc;
0173
0174 return offset + rc;
0175 }
0176 static DEVICE_ATTR_RO(target_list);
0177
0178 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
0179 char *buf)
0180 {
0181 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
0182
0183 switch (cxled->mode) {
0184 case CXL_DECODER_RAM:
0185 return sysfs_emit(buf, "ram\n");
0186 case CXL_DECODER_PMEM:
0187 return sysfs_emit(buf, "pmem\n");
0188 case CXL_DECODER_NONE:
0189 return sysfs_emit(buf, "none\n");
0190 case CXL_DECODER_MIXED:
0191 default:
0192 return sysfs_emit(buf, "mixed\n");
0193 }
0194 }
0195
0196 static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
0197 const char *buf, size_t len)
0198 {
0199 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
0200 enum cxl_decoder_mode mode;
0201 ssize_t rc;
0202
0203 if (sysfs_streq(buf, "pmem"))
0204 mode = CXL_DECODER_PMEM;
0205 else if (sysfs_streq(buf, "ram"))
0206 mode = CXL_DECODER_RAM;
0207 else
0208 return -EINVAL;
0209
0210 rc = cxl_dpa_set_mode(cxled, mode);
0211 if (rc)
0212 return rc;
0213
0214 return len;
0215 }
0216 static DEVICE_ATTR_RW(mode);
0217
0218 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
0219 char *buf)
0220 {
0221 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
0222 u64 base = cxl_dpa_resource_start(cxled);
0223
0224 return sysfs_emit(buf, "%#llx\n", base);
0225 }
0226 static DEVICE_ATTR_RO(dpa_resource);
0227
0228 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
0229 char *buf)
0230 {
0231 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
0232 resource_size_t size = cxl_dpa_size(cxled);
0233
0234 return sysfs_emit(buf, "%pa\n", &size);
0235 }
0236
0237 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
0238 const char *buf, size_t len)
0239 {
0240 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
0241 unsigned long long size;
0242 ssize_t rc;
0243
0244 rc = kstrtoull(buf, 0, &size);
0245 if (rc)
0246 return rc;
0247
0248 if (!IS_ALIGNED(size, SZ_256M))
0249 return -EINVAL;
0250
0251 rc = cxl_dpa_free(cxled);
0252 if (rc)
0253 return rc;
0254
0255 if (size == 0)
0256 return len;
0257
0258 rc = cxl_dpa_alloc(cxled, size);
0259 if (rc)
0260 return rc;
0261
0262 return len;
0263 }
0264 static DEVICE_ATTR_RW(dpa_size);
0265
0266 static ssize_t interleave_granularity_show(struct device *dev,
0267 struct device_attribute *attr,
0268 char *buf)
0269 {
0270 struct cxl_decoder *cxld = to_cxl_decoder(dev);
0271
0272 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
0273 }
0274
0275 static DEVICE_ATTR_RO(interleave_granularity);
0276
0277 static ssize_t interleave_ways_show(struct device *dev,
0278 struct device_attribute *attr, char *buf)
0279 {
0280 struct cxl_decoder *cxld = to_cxl_decoder(dev);
0281
0282 return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
0283 }
0284
0285 static DEVICE_ATTR_RO(interleave_ways);
0286
0287 static struct attribute *cxl_decoder_base_attrs[] = {
0288 &dev_attr_start.attr,
0289 &dev_attr_size.attr,
0290 &dev_attr_locked.attr,
0291 &dev_attr_interleave_granularity.attr,
0292 &dev_attr_interleave_ways.attr,
0293 NULL,
0294 };
0295
0296 static struct attribute_group cxl_decoder_base_attribute_group = {
0297 .attrs = cxl_decoder_base_attrs,
0298 };
0299
0300 static struct attribute *cxl_decoder_root_attrs[] = {
0301 &dev_attr_cap_pmem.attr,
0302 &dev_attr_cap_ram.attr,
0303 &dev_attr_cap_type2.attr,
0304 &dev_attr_cap_type3.attr,
0305 &dev_attr_target_list.attr,
0306 SET_CXL_REGION_ATTR(create_pmem_region)
0307 SET_CXL_REGION_ATTR(delete_region)
0308 NULL,
0309 };
0310
0311 static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
0312 {
0313 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
0314
0315 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
0316 }
0317
0318 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
0319 {
0320 struct device *dev = kobj_to_dev(kobj);
0321 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
0322
0323 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
0324 return 0;
0325
0326 if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd))
0327 return 0;
0328
0329 return a->mode;
0330 }
0331
0332 static struct attribute_group cxl_decoder_root_attribute_group = {
0333 .attrs = cxl_decoder_root_attrs,
0334 .is_visible = cxl_root_decoder_visible,
0335 };
0336
0337 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
0338 &cxl_decoder_root_attribute_group,
0339 &cxl_decoder_base_attribute_group,
0340 &cxl_base_attribute_group,
0341 NULL,
0342 };
0343
0344 static struct attribute *cxl_decoder_switch_attrs[] = {
0345 &dev_attr_target_type.attr,
0346 &dev_attr_target_list.attr,
0347 SET_CXL_REGION_ATTR(region)
0348 NULL,
0349 };
0350
0351 static struct attribute_group cxl_decoder_switch_attribute_group = {
0352 .attrs = cxl_decoder_switch_attrs,
0353 };
0354
0355 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
0356 &cxl_decoder_switch_attribute_group,
0357 &cxl_decoder_base_attribute_group,
0358 &cxl_base_attribute_group,
0359 NULL,
0360 };
0361
0362 static struct attribute *cxl_decoder_endpoint_attrs[] = {
0363 &dev_attr_target_type.attr,
0364 &dev_attr_mode.attr,
0365 &dev_attr_dpa_size.attr,
0366 &dev_attr_dpa_resource.attr,
0367 SET_CXL_REGION_ATTR(region)
0368 NULL,
0369 };
0370
0371 static struct attribute_group cxl_decoder_endpoint_attribute_group = {
0372 .attrs = cxl_decoder_endpoint_attrs,
0373 };
0374
0375 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
0376 &cxl_decoder_base_attribute_group,
0377 &cxl_decoder_endpoint_attribute_group,
0378 &cxl_base_attribute_group,
0379 NULL,
0380 };
0381
0382 static void __cxl_decoder_release(struct cxl_decoder *cxld)
0383 {
0384 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
0385
0386 ida_free(&port->decoder_ida, cxld->id);
0387 put_device(&port->dev);
0388 }
0389
0390 static void cxl_endpoint_decoder_release(struct device *dev)
0391 {
0392 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
0393
0394 __cxl_decoder_release(&cxled->cxld);
0395 kfree(cxled);
0396 }
0397
0398 static void cxl_switch_decoder_release(struct device *dev)
0399 {
0400 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
0401
0402 __cxl_decoder_release(&cxlsd->cxld);
0403 kfree(cxlsd);
0404 }
0405
0406 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
0407 {
0408 if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
0409 "not a cxl_root_decoder device\n"))
0410 return NULL;
0411 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
0412 }
0413 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
0414
0415 static void cxl_root_decoder_release(struct device *dev)
0416 {
0417 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
0418
0419 if (atomic_read(&cxlrd->region_id) >= 0)
0420 memregion_free(atomic_read(&cxlrd->region_id));
0421 __cxl_decoder_release(&cxlrd->cxlsd.cxld);
0422 kfree(cxlrd);
0423 }
0424
0425 static const struct device_type cxl_decoder_endpoint_type = {
0426 .name = "cxl_decoder_endpoint",
0427 .release = cxl_endpoint_decoder_release,
0428 .groups = cxl_decoder_endpoint_attribute_groups,
0429 };
0430
0431 static const struct device_type cxl_decoder_switch_type = {
0432 .name = "cxl_decoder_switch",
0433 .release = cxl_switch_decoder_release,
0434 .groups = cxl_decoder_switch_attribute_groups,
0435 };
0436
0437 static const struct device_type cxl_decoder_root_type = {
0438 .name = "cxl_decoder_root",
0439 .release = cxl_root_decoder_release,
0440 .groups = cxl_decoder_root_attribute_groups,
0441 };
0442
0443 bool is_endpoint_decoder(struct device *dev)
0444 {
0445 return dev->type == &cxl_decoder_endpoint_type;
0446 }
0447
0448 bool is_root_decoder(struct device *dev)
0449 {
0450 return dev->type == &cxl_decoder_root_type;
0451 }
0452 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
0453
0454 bool is_switch_decoder(struct device *dev)
0455 {
0456 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
0457 }
0458
0459 struct cxl_decoder *to_cxl_decoder(struct device *dev)
0460 {
0461 if (dev_WARN_ONCE(dev,
0462 !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
0463 "not a cxl_decoder device\n"))
0464 return NULL;
0465 return container_of(dev, struct cxl_decoder, dev);
0466 }
0467 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
0468
0469 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
0470 {
0471 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
0472 "not a cxl_endpoint_decoder device\n"))
0473 return NULL;
0474 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
0475 }
0476 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
0477
0478 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
0479 {
0480 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
0481 "not a cxl_switch_decoder device\n"))
0482 return NULL;
0483 return container_of(dev, struct cxl_switch_decoder, cxld.dev);
0484 }
0485
0486 static void cxl_ep_release(struct cxl_ep *ep)
0487 {
0488 put_device(ep->ep);
0489 kfree(ep);
0490 }
0491
0492 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
0493 {
0494 if (!ep)
0495 return;
0496 xa_erase(&port->endpoints, (unsigned long) ep->ep);
0497 cxl_ep_release(ep);
0498 }
0499
0500 static void cxl_port_release(struct device *dev)
0501 {
0502 struct cxl_port *port = to_cxl_port(dev);
0503 unsigned long index;
0504 struct cxl_ep *ep;
0505
0506 xa_for_each(&port->endpoints, index, ep)
0507 cxl_ep_remove(port, ep);
0508 xa_destroy(&port->endpoints);
0509 xa_destroy(&port->dports);
0510 xa_destroy(&port->regions);
0511 ida_free(&cxl_port_ida, port->id);
0512 kfree(port);
0513 }
0514
0515 static const struct attribute_group *cxl_port_attribute_groups[] = {
0516 &cxl_base_attribute_group,
0517 NULL,
0518 };
0519
0520 static const struct device_type cxl_port_type = {
0521 .name = "cxl_port",
0522 .release = cxl_port_release,
0523 .groups = cxl_port_attribute_groups,
0524 };
0525
0526 bool is_cxl_port(struct device *dev)
0527 {
0528 return dev->type == &cxl_port_type;
0529 }
0530 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
0531
0532 struct cxl_port *to_cxl_port(struct device *dev)
0533 {
0534 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
0535 "not a cxl_port device\n"))
0536 return NULL;
0537 return container_of(dev, struct cxl_port, dev);
0538 }
0539 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
0540
0541 static void unregister_port(void *_port)
0542 {
0543 struct cxl_port *port = _port;
0544 struct cxl_port *parent;
0545 struct device *lock_dev;
0546
0547 if (is_cxl_root(port))
0548 parent = NULL;
0549 else
0550 parent = to_cxl_port(port->dev.parent);
0551
0552
0553
0554
0555
0556
0557 if (!parent)
0558 lock_dev = port->uport;
0559 else if (is_cxl_root(parent))
0560 lock_dev = parent->uport;
0561 else
0562 lock_dev = &parent->dev;
0563
0564 device_lock_assert(lock_dev);
0565 port->dead = true;
0566 device_unregister(&port->dev);
0567 }
0568
0569 static void cxl_unlink_uport(void *_port)
0570 {
0571 struct cxl_port *port = _port;
0572
0573 sysfs_remove_link(&port->dev.kobj, "uport");
0574 }
0575
0576 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
0577 {
0578 int rc;
0579
0580 rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
0581 if (rc)
0582 return rc;
0583 return devm_add_action_or_reset(host, cxl_unlink_uport, port);
0584 }
0585
0586 static struct lock_class_key cxl_port_key;
0587
0588 static struct cxl_port *cxl_port_alloc(struct device *uport,
0589 resource_size_t component_reg_phys,
0590 struct cxl_dport *parent_dport)
0591 {
0592 struct cxl_port *port;
0593 struct device *dev;
0594 int rc;
0595
0596 port = kzalloc(sizeof(*port), GFP_KERNEL);
0597 if (!port)
0598 return ERR_PTR(-ENOMEM);
0599
0600 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
0601 if (rc < 0)
0602 goto err;
0603 port->id = rc;
0604 port->uport = uport;
0605
0606
0607
0608
0609
0610
0611
0612 dev = &port->dev;
0613 if (parent_dport) {
0614 struct cxl_port *parent_port = parent_dport->port;
0615 struct cxl_port *iter;
0616
0617 dev->parent = &parent_port->dev;
0618 port->depth = parent_port->depth + 1;
0619 port->parent_dport = parent_dport;
0620
0621
0622
0623
0624
0625 iter = port;
0626 while (!iter->host_bridge &&
0627 !is_cxl_root(to_cxl_port(iter->dev.parent)))
0628 iter = to_cxl_port(iter->dev.parent);
0629 if (iter->host_bridge)
0630 port->host_bridge = iter->host_bridge;
0631 else
0632 port->host_bridge = iter->uport;
0633 dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
0634 } else
0635 dev->parent = uport;
0636
0637 port->component_reg_phys = component_reg_phys;
0638 ida_init(&port->decoder_ida);
0639 port->hdm_end = -1;
0640 port->commit_end = -1;
0641 xa_init(&port->dports);
0642 xa_init(&port->endpoints);
0643 xa_init(&port->regions);
0644
0645 device_initialize(dev);
0646 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
0647 device_set_pm_not_required(dev);
0648 dev->bus = &cxl_bus_type;
0649 dev->type = &cxl_port_type;
0650
0651 return port;
0652
0653 err:
0654 kfree(port);
0655 return ERR_PTR(rc);
0656 }
0657
0658
0659
0660
0661
0662
0663
0664
0665 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
0666 resource_size_t component_reg_phys,
0667 struct cxl_dport *parent_dport)
0668 {
0669 struct cxl_port *port;
0670 struct device *dev;
0671 int rc;
0672
0673 port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
0674 if (IS_ERR(port))
0675 return port;
0676
0677 dev = &port->dev;
0678 if (is_cxl_memdev(uport))
0679 rc = dev_set_name(dev, "endpoint%d", port->id);
0680 else if (parent_dport)
0681 rc = dev_set_name(dev, "port%d", port->id);
0682 else
0683 rc = dev_set_name(dev, "root%d", port->id);
0684 if (rc)
0685 goto err;
0686
0687 rc = device_add(dev);
0688 if (rc)
0689 goto err;
0690
0691 rc = devm_add_action_or_reset(host, unregister_port, port);
0692 if (rc)
0693 return ERR_PTR(rc);
0694
0695 rc = devm_cxl_link_uport(host, port);
0696 if (rc)
0697 return ERR_PTR(rc);
0698
0699 return port;
0700
0701 err:
0702 put_device(dev);
0703 return ERR_PTR(rc);
0704 }
0705 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
0706
0707 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
0708 {
0709
0710 if (is_cxl_root(port))
0711 return NULL;
0712
0713 if (dev_is_pci(port->uport)) {
0714 struct pci_dev *pdev = to_pci_dev(port->uport);
0715
0716 return pdev->subordinate;
0717 }
0718
0719 return xa_load(&cxl_root_buses, (unsigned long)port->uport);
0720 }
0721 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
0722
0723 static void unregister_pci_bus(void *uport)
0724 {
0725 xa_erase(&cxl_root_buses, (unsigned long)uport);
0726 }
0727
0728 int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
0729 struct pci_bus *bus)
0730 {
0731 int rc;
0732
0733 if (dev_is_pci(uport))
0734 return -EINVAL;
0735
0736 rc = xa_insert(&cxl_root_buses, (unsigned long)uport, bus, GFP_KERNEL);
0737 if (rc)
0738 return rc;
0739 return devm_add_action_or_reset(host, unregister_pci_bus, uport);
0740 }
0741 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
0742
0743 static bool dev_is_cxl_root_child(struct device *dev)
0744 {
0745 struct cxl_port *port, *parent;
0746
0747 if (!is_cxl_port(dev))
0748 return false;
0749
0750 port = to_cxl_port(dev);
0751 if (is_cxl_root(port))
0752 return false;
0753
0754 parent = to_cxl_port(port->dev.parent);
0755 if (is_cxl_root(parent))
0756 return true;
0757
0758 return false;
0759 }
0760
0761
0762 static int match_root_child(struct device *dev, const void *match)
0763 {
0764 const struct device *iter = NULL;
0765 struct cxl_dport *dport;
0766 struct cxl_port *port;
0767
0768 if (!dev_is_cxl_root_child(dev))
0769 return 0;
0770
0771 port = to_cxl_port(dev);
0772 iter = match;
0773 while (iter) {
0774 dport = cxl_find_dport_by_dev(port, iter);
0775 if (dport)
0776 break;
0777 iter = iter->parent;
0778 }
0779
0780 return !!iter;
0781 }
0782
0783 struct cxl_port *find_cxl_root(struct device *dev)
0784 {
0785 struct device *port_dev;
0786 struct cxl_port *root;
0787
0788 port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
0789 if (!port_dev)
0790 return NULL;
0791
0792 root = to_cxl_port(port_dev->parent);
0793 get_device(&root->dev);
0794 put_device(port_dev);
0795 return root;
0796 }
0797 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
0798
0799 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
0800 {
0801 struct cxl_dport *dport;
0802 unsigned long index;
0803
0804 device_lock_assert(&port->dev);
0805 xa_for_each(&port->dports, index, dport)
0806 if (dport->port_id == id)
0807 return dport;
0808 return NULL;
0809 }
0810
0811 static int add_dport(struct cxl_port *port, struct cxl_dport *new)
0812 {
0813 struct cxl_dport *dup;
0814
0815 device_lock_assert(&port->dev);
0816 dup = find_dport(port, new->port_id);
0817 if (dup) {
0818 dev_err(&port->dev,
0819 "unable to add dport%d-%s non-unique port id (%s)\n",
0820 new->port_id, dev_name(new->dport),
0821 dev_name(dup->dport));
0822 return -EBUSY;
0823 }
0824 return xa_insert(&port->dports, (unsigned long)new->dport, new,
0825 GFP_KERNEL);
0826 }
0827
0828
0829
0830
0831
0832
0833
0834
0835 static void cond_cxl_root_lock(struct cxl_port *port)
0836 {
0837 if (is_cxl_root(port))
0838 device_lock(&port->dev);
0839 }
0840
0841 static void cond_cxl_root_unlock(struct cxl_port *port)
0842 {
0843 if (is_cxl_root(port))
0844 device_unlock(&port->dev);
0845 }
0846
0847 static void cxl_dport_remove(void *data)
0848 {
0849 struct cxl_dport *dport = data;
0850 struct cxl_port *port = dport->port;
0851
0852 xa_erase(&port->dports, (unsigned long) dport->dport);
0853 put_device(dport->dport);
0854 }
0855
0856 static void cxl_dport_unlink(void *data)
0857 {
0858 struct cxl_dport *dport = data;
0859 struct cxl_port *port = dport->port;
0860 char link_name[CXL_TARGET_STRLEN];
0861
0862 sprintf(link_name, "dport%d", dport->port_id);
0863 sysfs_remove_link(&port->dev.kobj, link_name);
0864 }
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
0878 struct device *dport_dev, int port_id,
0879 resource_size_t component_reg_phys)
0880 {
0881 char link_name[CXL_TARGET_STRLEN];
0882 struct cxl_dport *dport;
0883 struct device *host;
0884 int rc;
0885
0886 if (is_cxl_root(port))
0887 host = port->uport;
0888 else
0889 host = &port->dev;
0890
0891 if (!host->driver) {
0892 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
0893 dev_name(dport_dev));
0894 return ERR_PTR(-ENXIO);
0895 }
0896
0897 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
0898 CXL_TARGET_STRLEN)
0899 return ERR_PTR(-EINVAL);
0900
0901 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
0902 if (!dport)
0903 return ERR_PTR(-ENOMEM);
0904
0905 dport->dport = dport_dev;
0906 dport->port_id = port_id;
0907 dport->component_reg_phys = component_reg_phys;
0908 dport->port = port;
0909
0910 cond_cxl_root_lock(port);
0911 rc = add_dport(port, dport);
0912 cond_cxl_root_unlock(port);
0913 if (rc)
0914 return ERR_PTR(rc);
0915
0916 get_device(dport_dev);
0917 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
0918 if (rc)
0919 return ERR_PTR(rc);
0920
0921 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
0922 if (rc)
0923 return ERR_PTR(rc);
0924
0925 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
0926 if (rc)
0927 return ERR_PTR(rc);
0928
0929 return dport;
0930 }
0931 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
0932
0933 static int add_ep(struct cxl_ep *new)
0934 {
0935 struct cxl_port *port = new->dport->port;
0936 int rc;
0937
0938 device_lock(&port->dev);
0939 if (port->dead) {
0940 device_unlock(&port->dev);
0941 return -ENXIO;
0942 }
0943 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
0944 GFP_KERNEL);
0945 device_unlock(&port->dev);
0946
0947 return rc;
0948 }
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
0960 {
0961 struct cxl_ep *ep;
0962 int rc;
0963
0964 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
0965 if (!ep)
0966 return -ENOMEM;
0967
0968 ep->ep = get_device(ep_dev);
0969 ep->dport = dport;
0970
0971 rc = add_ep(ep);
0972 if (rc)
0973 cxl_ep_release(ep);
0974 return rc;
0975 }
0976
0977 struct cxl_find_port_ctx {
0978 const struct device *dport_dev;
0979 const struct cxl_port *parent_port;
0980 struct cxl_dport **dport;
0981 };
0982
0983 static int match_port_by_dport(struct device *dev, const void *data)
0984 {
0985 const struct cxl_find_port_ctx *ctx = data;
0986 struct cxl_dport *dport;
0987 struct cxl_port *port;
0988
0989 if (!is_cxl_port(dev))
0990 return 0;
0991 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
0992 return 0;
0993
0994 port = to_cxl_port(dev);
0995 dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
0996 if (ctx->dport)
0997 *ctx->dport = dport;
0998 return dport != NULL;
0999 }
1000
1001 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
1002 {
1003 struct device *dev;
1004
1005 if (!ctx->dport_dev)
1006 return NULL;
1007
1008 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
1009 if (dev)
1010 return to_cxl_port(dev);
1011 return NULL;
1012 }
1013
1014 static struct cxl_port *find_cxl_port(struct device *dport_dev,
1015 struct cxl_dport **dport)
1016 {
1017 struct cxl_find_port_ctx ctx = {
1018 .dport_dev = dport_dev,
1019 .dport = dport,
1020 };
1021 struct cxl_port *port;
1022
1023 port = __find_cxl_port(&ctx);
1024 return port;
1025 }
1026
1027 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
1028 struct device *dport_dev,
1029 struct cxl_dport **dport)
1030 {
1031 struct cxl_find_port_ctx ctx = {
1032 .dport_dev = dport_dev,
1033 .parent_port = parent_port,
1034 .dport = dport,
1035 };
1036 struct cxl_port *port;
1037
1038 port = __find_cxl_port(&ctx);
1039 return port;
1040 }
1041
1042
1043
1044
1045
1046
1047
1048
1049 static struct device *grandparent(struct device *dev)
1050 {
1051 if (dev && dev->parent)
1052 return dev->parent->parent;
1053 return NULL;
1054 }
1055
1056 static void delete_endpoint(void *data)
1057 {
1058 struct cxl_memdev *cxlmd = data;
1059 struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev);
1060 struct cxl_port *parent_port;
1061 struct device *parent;
1062
1063 parent_port = cxl_mem_find_port(cxlmd, NULL);
1064 if (!parent_port)
1065 goto out;
1066 parent = &parent_port->dev;
1067
1068 device_lock(parent);
1069 if (parent->driver && !endpoint->dead) {
1070 devm_release_action(parent, cxl_unlink_uport, endpoint);
1071 devm_release_action(parent, unregister_port, endpoint);
1072 }
1073 device_unlock(parent);
1074 put_device(parent);
1075 out:
1076 put_device(&endpoint->dev);
1077 }
1078
1079 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
1080 {
1081 struct device *dev = &cxlmd->dev;
1082
1083 get_device(&endpoint->dev);
1084 dev_set_drvdata(dev, endpoint);
1085 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
1086 }
1087 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 static void delete_switch_port(struct cxl_port *port)
1099 {
1100 devm_release_action(port->dev.parent, cxl_unlink_uport, port);
1101 devm_release_action(port->dev.parent, unregister_port, port);
1102 }
1103
1104 static void reap_dports(struct cxl_port *port)
1105 {
1106 struct cxl_dport *dport;
1107 unsigned long index;
1108
1109 device_lock_assert(&port->dev);
1110
1111 xa_for_each(&port->dports, index, dport) {
1112 devm_release_action(&port->dev, cxl_dport_unlink, dport);
1113 devm_release_action(&port->dev, cxl_dport_remove, dport);
1114 devm_kfree(&port->dev, dport);
1115 }
1116 }
1117
1118 int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
1119 struct cxl_dport *parent_dport)
1120 {
1121 struct cxl_port *parent_port = parent_dport->port;
1122 struct cxl_dev_state *cxlds = cxlmd->cxlds;
1123 struct cxl_port *endpoint, *iter, *down;
1124 int rc;
1125
1126
1127
1128
1129
1130 for (iter = parent_port, down = NULL; !is_cxl_root(iter);
1131 down = iter, iter = to_cxl_port(iter->dev.parent)) {
1132 struct cxl_ep *ep;
1133
1134 ep = cxl_ep_load(iter, cxlmd);
1135 ep->next = down;
1136 }
1137
1138 endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
1139 cxlds->component_reg_phys, parent_dport);
1140 if (IS_ERR(endpoint))
1141 return PTR_ERR(endpoint);
1142
1143 dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
1144
1145 rc = cxl_endpoint_autoremove(cxlmd, endpoint);
1146 if (rc)
1147 return rc;
1148
1149 if (!endpoint->dev.driver) {
1150 dev_err(&cxlmd->dev, "%s failed probe\n",
1151 dev_name(&endpoint->dev));
1152 return -ENXIO;
1153 }
1154
1155 return 0;
1156 }
1157 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL);
1158
1159 static void cxl_detach_ep(void *data)
1160 {
1161 struct cxl_memdev *cxlmd = data;
1162 struct device *iter;
1163
1164 for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) {
1165 struct device *dport_dev = grandparent(iter);
1166 struct cxl_port *port, *parent_port;
1167 struct cxl_ep *ep;
1168 bool died = false;
1169
1170 if (!dport_dev)
1171 break;
1172
1173 port = find_cxl_port(dport_dev, NULL);
1174 if (!port)
1175 continue;
1176
1177 if (is_cxl_root(port)) {
1178 put_device(&port->dev);
1179 continue;
1180 }
1181
1182 parent_port = to_cxl_port(port->dev.parent);
1183 device_lock(&parent_port->dev);
1184 if (!parent_port->dev.driver) {
1185
1186
1187
1188
1189
1190
1191 device_unlock(&parent_port->dev);
1192 put_device(&port->dev);
1193 continue;
1194 }
1195
1196 device_lock(&port->dev);
1197 ep = cxl_ep_load(port, cxlmd);
1198 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
1199 ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
1200 cxl_ep_remove(port, ep);
1201 if (ep && !port->dead && xa_empty(&port->endpoints) &&
1202 !is_cxl_root(parent_port)) {
1203
1204
1205
1206
1207
1208 died = true;
1209 port->dead = true;
1210 reap_dports(port);
1211 }
1212 device_unlock(&port->dev);
1213
1214 if (died) {
1215 dev_dbg(&cxlmd->dev, "delete %s\n",
1216 dev_name(&port->dev));
1217 delete_switch_port(port);
1218 }
1219 put_device(&port->dev);
1220 device_unlock(&parent_port->dev);
1221 }
1222 }
1223
1224 static resource_size_t find_component_registers(struct device *dev)
1225 {
1226 struct cxl_register_map map;
1227 struct pci_dev *pdev;
1228
1229
1230
1231
1232
1233 if (!dev_is_pci(dev))
1234 return CXL_RESOURCE_NONE;
1235
1236 pdev = to_pci_dev(dev);
1237
1238 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
1239 return cxl_regmap_to_base(pdev, &map);
1240 }
1241
1242 static int add_port_attach_ep(struct cxl_memdev *cxlmd,
1243 struct device *uport_dev,
1244 struct device *dport_dev)
1245 {
1246 struct device *dparent = grandparent(dport_dev);
1247 struct cxl_port *port, *parent_port = NULL;
1248 struct cxl_dport *dport, *parent_dport;
1249 resource_size_t component_reg_phys;
1250 int rc;
1251
1252 if (!dparent) {
1253
1254
1255
1256
1257
1258 dev_dbg(&cxlmd->dev, "%s is a root dport\n",
1259 dev_name(dport_dev));
1260 return -ENXIO;
1261 }
1262
1263 parent_port = find_cxl_port(dparent, &parent_dport);
1264 if (!parent_port) {
1265
1266 return -EAGAIN;
1267 }
1268
1269 device_lock(&parent_port->dev);
1270 if (!parent_port->dev.driver) {
1271 dev_warn(&cxlmd->dev,
1272 "port %s:%s disabled, failed to enumerate CXL.mem\n",
1273 dev_name(&parent_port->dev), dev_name(uport_dev));
1274 port = ERR_PTR(-ENXIO);
1275 goto out;
1276 }
1277
1278 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1279 if (!port) {
1280 component_reg_phys = find_component_registers(uport_dev);
1281 port = devm_cxl_add_port(&parent_port->dev, uport_dev,
1282 component_reg_phys, parent_dport);
1283
1284 if (!IS_ERR(port))
1285 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1286 }
1287 out:
1288 device_unlock(&parent_port->dev);
1289
1290 if (IS_ERR(port))
1291 rc = PTR_ERR(port);
1292 else {
1293 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
1294 dev_name(&port->dev), dev_name(port->uport));
1295 rc = cxl_add_ep(dport, &cxlmd->dev);
1296 if (rc == -EBUSY) {
1297
1298
1299
1300
1301 rc = -ENXIO;
1302 }
1303 put_device(&port->dev);
1304 }
1305
1306 put_device(&parent_port->dev);
1307 return rc;
1308 }
1309
1310 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
1311 {
1312 struct device *dev = &cxlmd->dev;
1313 struct device *iter;
1314 int rc;
1315
1316 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
1317 if (rc)
1318 return rc;
1319
1320
1321
1322
1323
1324
1325 retry:
1326 for (iter = dev; iter; iter = grandparent(iter)) {
1327 struct device *dport_dev = grandparent(iter);
1328 struct device *uport_dev;
1329 struct cxl_dport *dport;
1330 struct cxl_port *port;
1331
1332 if (!dport_dev)
1333 return 0;
1334
1335 uport_dev = dport_dev->parent;
1336 if (!uport_dev) {
1337 dev_warn(dev, "at %s no parent for dport: %s\n",
1338 dev_name(iter), dev_name(dport_dev));
1339 return -ENXIO;
1340 }
1341
1342 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
1343 dev_name(iter), dev_name(dport_dev),
1344 dev_name(uport_dev));
1345 port = find_cxl_port(dport_dev, &dport);
1346 if (port) {
1347 dev_dbg(&cxlmd->dev,
1348 "found already registered port %s:%s\n",
1349 dev_name(&port->dev), dev_name(port->uport));
1350 rc = cxl_add_ep(dport, &cxlmd->dev);
1351
1352
1353
1354
1355
1356
1357
1358
1359 if (rc && rc != -EBUSY) {
1360 put_device(&port->dev);
1361 return rc;
1362 }
1363
1364
1365 if (!dev_is_cxl_root_child(&port->dev)) {
1366 put_device(&port->dev);
1367 continue;
1368 }
1369
1370 put_device(&port->dev);
1371 return 0;
1372 }
1373
1374 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
1375
1376 if (rc == -EAGAIN)
1377 continue;
1378
1379 if (rc)
1380 return rc;
1381
1382 goto retry;
1383 }
1384
1385 return 0;
1386 }
1387 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
1388
1389 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
1390 struct cxl_dport **dport)
1391 {
1392 return find_cxl_port(grandparent(&cxlmd->dev), dport);
1393 }
1394 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
1395
1396 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
1397 struct cxl_port *port, int *target_map)
1398 {
1399 int i, rc = 0;
1400
1401 if (!target_map)
1402 return 0;
1403
1404 device_lock_assert(&port->dev);
1405
1406 if (xa_empty(&port->dports))
1407 return -EINVAL;
1408
1409 write_seqlock(&cxlsd->target_lock);
1410 for (i = 0; i < cxlsd->nr_targets; i++) {
1411 struct cxl_dport *dport = find_dport(port, target_map[i]);
1412
1413 if (!dport) {
1414 rc = -ENXIO;
1415 break;
1416 }
1417 cxlsd->target[i] = dport;
1418 }
1419 write_sequnlock(&cxlsd->target_lock);
1420
1421 return rc;
1422 }
1423
1424 static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
1425 {
1426 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1427 struct cxl_decoder *cxld = &cxlsd->cxld;
1428 int iw;
1429
1430 iw = cxld->interleave_ways;
1431 if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
1432 "misconfigured root decoder\n"))
1433 return NULL;
1434
1435 return cxlrd->cxlsd.target[pos % iw];
1436 }
1437
1438 static struct lock_class_key cxl_decoder_key;
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
1451 {
1452 struct device *dev;
1453 int rc;
1454
1455 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1456 if (rc < 0)
1457 return rc;
1458
1459
1460 get_device(&port->dev);
1461 cxld->id = rc;
1462
1463 dev = &cxld->dev;
1464 device_initialize(dev);
1465 lockdep_set_class(&dev->mutex, &cxl_decoder_key);
1466 device_set_pm_not_required(dev);
1467 dev->parent = &port->dev;
1468 dev->bus = &cxl_bus_type;
1469
1470
1471 cxld->interleave_ways = 1;
1472 cxld->interleave_granularity = PAGE_SIZE;
1473 cxld->target_type = CXL_DECODER_EXPANDER;
1474 cxld->hpa_range = (struct range) {
1475 .start = 0,
1476 .end = -1,
1477 };
1478
1479 return 0;
1480 }
1481
1482 static int cxl_switch_decoder_init(struct cxl_port *port,
1483 struct cxl_switch_decoder *cxlsd,
1484 int nr_targets)
1485 {
1486 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
1487 return -EINVAL;
1488
1489 cxlsd->nr_targets = nr_targets;
1490 seqlock_init(&cxlsd->target_lock);
1491 return cxl_decoder_init(port, &cxlsd->cxld);
1492 }
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
1505 unsigned int nr_targets)
1506 {
1507 struct cxl_root_decoder *cxlrd;
1508 struct cxl_switch_decoder *cxlsd;
1509 struct cxl_decoder *cxld;
1510 int rc;
1511
1512 if (!is_cxl_root(port))
1513 return ERR_PTR(-EINVAL);
1514
1515 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
1516 GFP_KERNEL);
1517 if (!cxlrd)
1518 return ERR_PTR(-ENOMEM);
1519
1520 cxlsd = &cxlrd->cxlsd;
1521 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1522 if (rc) {
1523 kfree(cxlrd);
1524 return ERR_PTR(rc);
1525 }
1526
1527 cxlrd->calc_hb = cxl_hb_modulo;
1528
1529 cxld = &cxlsd->cxld;
1530 cxld->dev.type = &cxl_decoder_root_type;
1531
1532
1533
1534
1535 atomic_set(&cxlrd->region_id, -1);
1536 rc = memregion_alloc(GFP_KERNEL);
1537 if (rc < 0) {
1538 put_device(&cxld->dev);
1539 return ERR_PTR(rc);
1540 }
1541
1542 atomic_set(&cxlrd->region_id, rc);
1543 return cxlrd;
1544 }
1545 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
1559 unsigned int nr_targets)
1560 {
1561 struct cxl_switch_decoder *cxlsd;
1562 struct cxl_decoder *cxld;
1563 int rc;
1564
1565 if (is_cxl_root(port) || is_cxl_endpoint(port))
1566 return ERR_PTR(-EINVAL);
1567
1568 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
1569 if (!cxlsd)
1570 return ERR_PTR(-ENOMEM);
1571
1572 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1573 if (rc) {
1574 kfree(cxlsd);
1575 return ERR_PTR(rc);
1576 }
1577
1578 cxld = &cxlsd->cxld;
1579 cxld->dev.type = &cxl_decoder_switch_type;
1580 return cxlsd;
1581 }
1582 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
1583
1584
1585
1586
1587
1588
1589
1590 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
1591 {
1592 struct cxl_endpoint_decoder *cxled;
1593 struct cxl_decoder *cxld;
1594 int rc;
1595
1596 if (!is_cxl_endpoint(port))
1597 return ERR_PTR(-EINVAL);
1598
1599 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
1600 if (!cxled)
1601 return ERR_PTR(-ENOMEM);
1602
1603 cxled->pos = -1;
1604 cxld = &cxled->cxld;
1605 rc = cxl_decoder_init(port, cxld);
1606 if (rc) {
1607 kfree(cxled);
1608 return ERR_PTR(rc);
1609 }
1610
1611 cxld->dev.type = &cxl_decoder_endpoint_type;
1612 return cxled;
1613 }
1614 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
1636 {
1637 struct cxl_port *port;
1638 struct device *dev;
1639 int rc;
1640
1641 if (WARN_ON_ONCE(!cxld))
1642 return -EINVAL;
1643
1644 if (WARN_ON_ONCE(IS_ERR(cxld)))
1645 return PTR_ERR(cxld);
1646
1647 if (cxld->interleave_ways < 1)
1648 return -EINVAL;
1649
1650 dev = &cxld->dev;
1651
1652 port = to_cxl_port(cxld->dev.parent);
1653 if (!is_endpoint_decoder(dev)) {
1654 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
1655
1656 rc = decoder_populate_targets(cxlsd, port, target_map);
1657 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
1658 dev_err(&port->dev,
1659 "Failed to populate active decoder targets\n");
1660 return rc;
1661 }
1662 }
1663
1664 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1665 if (rc)
1666 return rc;
1667
1668 return device_add(dev);
1669 }
1670 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
1686 {
1687 struct cxl_port *port;
1688 int rc;
1689
1690 if (WARN_ON_ONCE(!cxld))
1691 return -EINVAL;
1692
1693 if (WARN_ON_ONCE(IS_ERR(cxld)))
1694 return PTR_ERR(cxld);
1695
1696 port = to_cxl_port(cxld->dev.parent);
1697
1698 device_lock(&port->dev);
1699 rc = cxl_decoder_add_locked(cxld, target_map);
1700 device_unlock(&port->dev);
1701
1702 return rc;
1703 }
1704 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
1705
1706 static void cxld_unregister(void *dev)
1707 {
1708 struct cxl_endpoint_decoder *cxled;
1709
1710 if (is_endpoint_decoder(dev)) {
1711 cxled = to_cxl_endpoint_decoder(dev);
1712 cxl_decoder_kill_region(cxled);
1713 }
1714
1715 device_unregister(dev);
1716 }
1717
1718 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
1719 {
1720 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
1721 }
1722 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
1723
1724
1725
1726
1727
1728
1729
1730 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
1731 const char *modname)
1732 {
1733 if (!cxl_drv->probe) {
1734 pr_debug("%s ->probe() must be specified\n", modname);
1735 return -EINVAL;
1736 }
1737
1738 if (!cxl_drv->name) {
1739 pr_debug("%s ->name must be specified\n", modname);
1740 return -EINVAL;
1741 }
1742
1743 if (!cxl_drv->id) {
1744 pr_debug("%s ->id must be specified\n", modname);
1745 return -EINVAL;
1746 }
1747
1748 cxl_drv->drv.bus = &cxl_bus_type;
1749 cxl_drv->drv.owner = owner;
1750 cxl_drv->drv.mod_name = modname;
1751 cxl_drv->drv.name = cxl_drv->name;
1752
1753 return driver_register(&cxl_drv->drv);
1754 }
1755 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
1756
1757 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1758 {
1759 driver_unregister(&cxl_drv->drv);
1760 }
1761 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
1762
1763 static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
1764 {
1765 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1766 cxl_device_id(dev));
1767 }
1768
1769 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1770 {
1771 return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1772 }
1773
1774 static int cxl_bus_probe(struct device *dev)
1775 {
1776 int rc;
1777
1778 rc = to_cxl_drv(dev->driver)->probe(dev);
1779 dev_dbg(dev, "probe: %d\n", rc);
1780 return rc;
1781 }
1782
1783 static void cxl_bus_remove(struct device *dev)
1784 {
1785 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1786
1787 if (cxl_drv->remove)
1788 cxl_drv->remove(dev);
1789 }
1790
1791 static struct workqueue_struct *cxl_bus_wq;
1792
1793 int cxl_bus_rescan(void)
1794 {
1795 return bus_rescan_devices(&cxl_bus_type);
1796 }
1797 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
1798
1799 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
1800 {
1801 return queue_work(cxl_bus_wq, &cxlmd->detach_work);
1802 }
1803 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
1804
1805
1806 static ssize_t flush_store(struct bus_type *bus, const char *buf, size_t count)
1807 {
1808 if (sysfs_streq(buf, "1")) {
1809 flush_workqueue(cxl_bus_wq);
1810 return count;
1811 }
1812
1813 return -EINVAL;
1814 }
1815
1816 static BUS_ATTR_WO(flush);
1817
1818 static struct attribute *cxl_bus_attributes[] = {
1819 &bus_attr_flush.attr,
1820 NULL,
1821 };
1822
1823 static struct attribute_group cxl_bus_attribute_group = {
1824 .attrs = cxl_bus_attributes,
1825 };
1826
1827 static const struct attribute_group *cxl_bus_attribute_groups[] = {
1828 &cxl_bus_attribute_group,
1829 NULL,
1830 };
1831
1832 struct bus_type cxl_bus_type = {
1833 .name = "cxl",
1834 .uevent = cxl_bus_uevent,
1835 .match = cxl_bus_match,
1836 .probe = cxl_bus_probe,
1837 .remove = cxl_bus_remove,
1838 .bus_groups = cxl_bus_attribute_groups,
1839 };
1840 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
1841
1842 static struct dentry *cxl_debugfs;
1843
1844 struct dentry *cxl_debugfs_create_dir(const char *dir)
1845 {
1846 return debugfs_create_dir(dir, cxl_debugfs);
1847 }
1848 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
1849
1850 static __init int cxl_core_init(void)
1851 {
1852 int rc;
1853
1854 cxl_debugfs = debugfs_create_dir("cxl", NULL);
1855
1856 cxl_mbox_init();
1857
1858 rc = cxl_memdev_init();
1859 if (rc)
1860 return rc;
1861
1862 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
1863 if (!cxl_bus_wq) {
1864 rc = -ENOMEM;
1865 goto err_wq;
1866 }
1867
1868 rc = bus_register(&cxl_bus_type);
1869 if (rc)
1870 goto err_bus;
1871
1872 rc = cxl_region_init();
1873 if (rc)
1874 goto err_region;
1875
1876 return 0;
1877
1878 err_region:
1879 bus_unregister(&cxl_bus_type);
1880 err_bus:
1881 destroy_workqueue(cxl_bus_wq);
1882 err_wq:
1883 cxl_memdev_exit();
1884 return rc;
1885 }
1886
1887 static void cxl_core_exit(void)
1888 {
1889 cxl_region_exit();
1890 bus_unregister(&cxl_bus_type);
1891 destroy_workqueue(cxl_bus_wq);
1892 cxl_memdev_exit();
1893 debugfs_remove_recursive(cxl_debugfs);
1894 }
1895
1896 module_init(cxl_core_init);
1897 module_exit(cxl_core_exit);
1898 MODULE_LICENSE("GPL v2");