0001
0002
0003 #include <linux/memregion.h>
0004 #include <linux/genalloc.h>
0005 #include <linux/device.h>
0006 #include <linux/module.h>
0007 #include <linux/slab.h>
0008 #include <linux/uuid.h>
0009 #include <linux/idr.h>
0010 #include <cxlmem.h>
0011 #include <cxl.h>
0012 #include "core.h"
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 static DECLARE_RWSEM(cxl_region_rwsem);
0035
0036 static struct cxl_region *to_cxl_region(struct device *dev);
0037
0038 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
0039 char *buf)
0040 {
0041 struct cxl_region *cxlr = to_cxl_region(dev);
0042 struct cxl_region_params *p = &cxlr->params;
0043 ssize_t rc;
0044
0045 rc = down_read_interruptible(&cxl_region_rwsem);
0046 if (rc)
0047 return rc;
0048 rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
0049 up_read(&cxl_region_rwsem);
0050
0051 return rc;
0052 }
0053
0054 static int is_dup(struct device *match, void *data)
0055 {
0056 struct cxl_region_params *p;
0057 struct cxl_region *cxlr;
0058 uuid_t *uuid = data;
0059
0060 if (!is_cxl_region(match))
0061 return 0;
0062
0063 lockdep_assert_held(&cxl_region_rwsem);
0064 cxlr = to_cxl_region(match);
0065 p = &cxlr->params;
0066
0067 if (uuid_equal(&p->uuid, uuid)) {
0068 dev_dbg(match, "already has uuid: %pUb\n", uuid);
0069 return -EBUSY;
0070 }
0071
0072 return 0;
0073 }
0074
0075 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
0076 const char *buf, size_t len)
0077 {
0078 struct cxl_region *cxlr = to_cxl_region(dev);
0079 struct cxl_region_params *p = &cxlr->params;
0080 uuid_t temp;
0081 ssize_t rc;
0082
0083 if (len != UUID_STRING_LEN + 1)
0084 return -EINVAL;
0085
0086 rc = uuid_parse(buf, &temp);
0087 if (rc)
0088 return rc;
0089
0090 if (uuid_is_null(&temp))
0091 return -EINVAL;
0092
0093 rc = down_write_killable(&cxl_region_rwsem);
0094 if (rc)
0095 return rc;
0096
0097 if (uuid_equal(&p->uuid, &temp))
0098 goto out;
0099
0100 rc = -EBUSY;
0101 if (p->state >= CXL_CONFIG_ACTIVE)
0102 goto out;
0103
0104 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
0105 if (rc < 0)
0106 goto out;
0107
0108 uuid_copy(&p->uuid, &temp);
0109 out:
0110 up_write(&cxl_region_rwsem);
0111
0112 if (rc)
0113 return rc;
0114 return len;
0115 }
0116 static DEVICE_ATTR_RW(uuid);
0117
0118 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
0119 struct cxl_region *cxlr)
0120 {
0121 return xa_load(&port->regions, (unsigned long)cxlr);
0122 }
0123
0124 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
0125 {
0126 struct cxl_region_params *p = &cxlr->params;
0127 int i;
0128
0129 for (i = count - 1; i >= 0; i--) {
0130 struct cxl_endpoint_decoder *cxled = p->targets[i];
0131 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0132 struct cxl_port *iter = cxled_to_port(cxled);
0133 struct cxl_ep *ep;
0134 int rc;
0135
0136 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
0137 iter = to_cxl_port(iter->dev.parent);
0138
0139 for (ep = cxl_ep_load(iter, cxlmd); iter;
0140 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
0141 struct cxl_region_ref *cxl_rr;
0142 struct cxl_decoder *cxld;
0143
0144 cxl_rr = cxl_rr_load(iter, cxlr);
0145 cxld = cxl_rr->decoder;
0146 rc = cxld->reset(cxld);
0147 if (rc)
0148 return rc;
0149 }
0150
0151 rc = cxled->cxld.reset(&cxled->cxld);
0152 if (rc)
0153 return rc;
0154 }
0155
0156 return 0;
0157 }
0158
0159 static int cxl_region_decode_commit(struct cxl_region *cxlr)
0160 {
0161 struct cxl_region_params *p = &cxlr->params;
0162 int i, rc = 0;
0163
0164 for (i = 0; i < p->nr_targets; i++) {
0165 struct cxl_endpoint_decoder *cxled = p->targets[i];
0166 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0167 struct cxl_region_ref *cxl_rr;
0168 struct cxl_decoder *cxld;
0169 struct cxl_port *iter;
0170 struct cxl_ep *ep;
0171
0172
0173 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
0174 iter = to_cxl_port(iter->dev.parent)) {
0175 cxl_rr = cxl_rr_load(iter, cxlr);
0176 cxld = cxl_rr->decoder;
0177 rc = cxld->commit(cxld);
0178 if (rc)
0179 break;
0180 }
0181
0182 if (rc) {
0183
0184 for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
0185 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
0186 cxl_rr = cxl_rr_load(iter, cxlr);
0187 cxld = cxl_rr->decoder;
0188 cxld->reset(cxld);
0189 }
0190
0191 cxled->cxld.reset(&cxled->cxld);
0192 goto err;
0193 }
0194 }
0195
0196 return 0;
0197
0198 err:
0199
0200 cxl_region_decode_reset(cxlr, i);
0201 return rc;
0202 }
0203
0204 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
0205 const char *buf, size_t len)
0206 {
0207 struct cxl_region *cxlr = to_cxl_region(dev);
0208 struct cxl_region_params *p = &cxlr->params;
0209 bool commit;
0210 ssize_t rc;
0211
0212 rc = kstrtobool(buf, &commit);
0213 if (rc)
0214 return rc;
0215
0216 rc = down_write_killable(&cxl_region_rwsem);
0217 if (rc)
0218 return rc;
0219
0220
0221 if (commit && p->state >= CXL_CONFIG_COMMIT)
0222 goto out;
0223 if (!commit && p->state < CXL_CONFIG_COMMIT)
0224 goto out;
0225
0226
0227 if (commit && p->state < CXL_CONFIG_ACTIVE) {
0228 rc = -ENXIO;
0229 goto out;
0230 }
0231
0232 if (commit)
0233 rc = cxl_region_decode_commit(cxlr);
0234 else {
0235 p->state = CXL_CONFIG_RESET_PENDING;
0236 up_write(&cxl_region_rwsem);
0237 device_release_driver(&cxlr->dev);
0238 down_write(&cxl_region_rwsem);
0239
0240
0241
0242
0243
0244 if (p->state == CXL_CONFIG_RESET_PENDING)
0245 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
0246 }
0247
0248 if (rc)
0249 goto out;
0250
0251 if (commit)
0252 p->state = CXL_CONFIG_COMMIT;
0253 else if (p->state == CXL_CONFIG_RESET_PENDING)
0254 p->state = CXL_CONFIG_ACTIVE;
0255
0256 out:
0257 up_write(&cxl_region_rwsem);
0258
0259 if (rc)
0260 return rc;
0261 return len;
0262 }
0263
0264 static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
0265 char *buf)
0266 {
0267 struct cxl_region *cxlr = to_cxl_region(dev);
0268 struct cxl_region_params *p = &cxlr->params;
0269 ssize_t rc;
0270
0271 rc = down_read_interruptible(&cxl_region_rwsem);
0272 if (rc)
0273 return rc;
0274 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
0275 up_read(&cxl_region_rwsem);
0276
0277 return rc;
0278 }
0279 static DEVICE_ATTR_RW(commit);
0280
0281 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
0282 int n)
0283 {
0284 struct device *dev = kobj_to_dev(kobj);
0285 struct cxl_region *cxlr = to_cxl_region(dev);
0286
0287 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
0288 return 0;
0289 return a->mode;
0290 }
0291
0292 static ssize_t interleave_ways_show(struct device *dev,
0293 struct device_attribute *attr, char *buf)
0294 {
0295 struct cxl_region *cxlr = to_cxl_region(dev);
0296 struct cxl_region_params *p = &cxlr->params;
0297 ssize_t rc;
0298
0299 rc = down_read_interruptible(&cxl_region_rwsem);
0300 if (rc)
0301 return rc;
0302 rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
0303 up_read(&cxl_region_rwsem);
0304
0305 return rc;
0306 }
0307
0308 static const struct attribute_group *get_cxl_region_target_group(void);
0309
0310 static ssize_t interleave_ways_store(struct device *dev,
0311 struct device_attribute *attr,
0312 const char *buf, size_t len)
0313 {
0314 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
0315 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
0316 struct cxl_region *cxlr = to_cxl_region(dev);
0317 struct cxl_region_params *p = &cxlr->params;
0318 unsigned int val, save;
0319 int rc;
0320 u8 iw;
0321
0322 rc = kstrtouint(buf, 0, &val);
0323 if (rc)
0324 return rc;
0325
0326 rc = ways_to_cxl(val, &iw);
0327 if (rc)
0328 return rc;
0329
0330
0331
0332
0333
0334 if (!is_power_of_2(val / cxld->interleave_ways) ||
0335 (val % cxld->interleave_ways)) {
0336 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
0337 return -EINVAL;
0338 }
0339
0340 rc = down_write_killable(&cxl_region_rwsem);
0341 if (rc)
0342 return rc;
0343 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
0344 rc = -EBUSY;
0345 goto out;
0346 }
0347
0348 save = p->interleave_ways;
0349 p->interleave_ways = val;
0350 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
0351 if (rc)
0352 p->interleave_ways = save;
0353 out:
0354 up_write(&cxl_region_rwsem);
0355 if (rc)
0356 return rc;
0357 return len;
0358 }
0359 static DEVICE_ATTR_RW(interleave_ways);
0360
0361 static ssize_t interleave_granularity_show(struct device *dev,
0362 struct device_attribute *attr,
0363 char *buf)
0364 {
0365 struct cxl_region *cxlr = to_cxl_region(dev);
0366 struct cxl_region_params *p = &cxlr->params;
0367 ssize_t rc;
0368
0369 rc = down_read_interruptible(&cxl_region_rwsem);
0370 if (rc)
0371 return rc;
0372 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
0373 up_read(&cxl_region_rwsem);
0374
0375 return rc;
0376 }
0377
0378 static ssize_t interleave_granularity_store(struct device *dev,
0379 struct device_attribute *attr,
0380 const char *buf, size_t len)
0381 {
0382 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
0383 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
0384 struct cxl_region *cxlr = to_cxl_region(dev);
0385 struct cxl_region_params *p = &cxlr->params;
0386 int rc, val;
0387 u16 ig;
0388
0389 rc = kstrtoint(buf, 0, &val);
0390 if (rc)
0391 return rc;
0392
0393 rc = granularity_to_cxl(val, &ig);
0394 if (rc)
0395 return rc;
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
0406 return -EINVAL;
0407
0408 rc = down_write_killable(&cxl_region_rwsem);
0409 if (rc)
0410 return rc;
0411 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
0412 rc = -EBUSY;
0413 goto out;
0414 }
0415
0416 p->interleave_granularity = val;
0417 out:
0418 up_write(&cxl_region_rwsem);
0419 if (rc)
0420 return rc;
0421 return len;
0422 }
0423 static DEVICE_ATTR_RW(interleave_granularity);
0424
0425 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
0426 char *buf)
0427 {
0428 struct cxl_region *cxlr = to_cxl_region(dev);
0429 struct cxl_region_params *p = &cxlr->params;
0430 u64 resource = -1ULL;
0431 ssize_t rc;
0432
0433 rc = down_read_interruptible(&cxl_region_rwsem);
0434 if (rc)
0435 return rc;
0436 if (p->res)
0437 resource = p->res->start;
0438 rc = sysfs_emit(buf, "%#llx\n", resource);
0439 up_read(&cxl_region_rwsem);
0440
0441 return rc;
0442 }
0443 static DEVICE_ATTR_RO(resource);
0444
0445 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
0446 {
0447 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
0448 struct cxl_region_params *p = &cxlr->params;
0449 struct resource *res;
0450 u32 remainder = 0;
0451
0452 lockdep_assert_held_write(&cxl_region_rwsem);
0453
0454
0455 if (p->res && resource_size(p->res) == size)
0456 return 0;
0457
0458
0459 if (p->res)
0460 return -EBUSY;
0461
0462 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
0463 return -EBUSY;
0464
0465
0466 if (!p->interleave_ways || !p->interleave_granularity ||
0467 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
0468 return -ENXIO;
0469
0470 div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
0471 if (remainder)
0472 return -EINVAL;
0473
0474 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
0475 dev_name(&cxlr->dev));
0476 if (IS_ERR(res)) {
0477 dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
0478 PTR_ERR(res));
0479 return PTR_ERR(res);
0480 }
0481
0482 p->res = res;
0483 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
0484
0485 return 0;
0486 }
0487
0488 static void cxl_region_iomem_release(struct cxl_region *cxlr)
0489 {
0490 struct cxl_region_params *p = &cxlr->params;
0491
0492 if (device_is_registered(&cxlr->dev))
0493 lockdep_assert_held_write(&cxl_region_rwsem);
0494 if (p->res) {
0495 remove_resource(p->res);
0496 kfree(p->res);
0497 p->res = NULL;
0498 }
0499 }
0500
0501 static int free_hpa(struct cxl_region *cxlr)
0502 {
0503 struct cxl_region_params *p = &cxlr->params;
0504
0505 lockdep_assert_held_write(&cxl_region_rwsem);
0506
0507 if (!p->res)
0508 return 0;
0509
0510 if (p->state >= CXL_CONFIG_ACTIVE)
0511 return -EBUSY;
0512
0513 cxl_region_iomem_release(cxlr);
0514 p->state = CXL_CONFIG_IDLE;
0515 return 0;
0516 }
0517
0518 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
0519 const char *buf, size_t len)
0520 {
0521 struct cxl_region *cxlr = to_cxl_region(dev);
0522 u64 val;
0523 int rc;
0524
0525 rc = kstrtou64(buf, 0, &val);
0526 if (rc)
0527 return rc;
0528
0529 rc = down_write_killable(&cxl_region_rwsem);
0530 if (rc)
0531 return rc;
0532
0533 if (val)
0534 rc = alloc_hpa(cxlr, val);
0535 else
0536 rc = free_hpa(cxlr);
0537 up_write(&cxl_region_rwsem);
0538
0539 if (rc)
0540 return rc;
0541
0542 return len;
0543 }
0544
0545 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
0546 char *buf)
0547 {
0548 struct cxl_region *cxlr = to_cxl_region(dev);
0549 struct cxl_region_params *p = &cxlr->params;
0550 u64 size = 0;
0551 ssize_t rc;
0552
0553 rc = down_read_interruptible(&cxl_region_rwsem);
0554 if (rc)
0555 return rc;
0556 if (p->res)
0557 size = resource_size(p->res);
0558 rc = sysfs_emit(buf, "%#llx\n", size);
0559 up_read(&cxl_region_rwsem);
0560
0561 return rc;
0562 }
0563 static DEVICE_ATTR_RW(size);
0564
0565 static struct attribute *cxl_region_attrs[] = {
0566 &dev_attr_uuid.attr,
0567 &dev_attr_commit.attr,
0568 &dev_attr_interleave_ways.attr,
0569 &dev_attr_interleave_granularity.attr,
0570 &dev_attr_resource.attr,
0571 &dev_attr_size.attr,
0572 NULL,
0573 };
0574
0575 static const struct attribute_group cxl_region_group = {
0576 .attrs = cxl_region_attrs,
0577 .is_visible = cxl_region_visible,
0578 };
0579
0580 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
0581 {
0582 struct cxl_region_params *p = &cxlr->params;
0583 struct cxl_endpoint_decoder *cxled;
0584 int rc;
0585
0586 rc = down_read_interruptible(&cxl_region_rwsem);
0587 if (rc)
0588 return rc;
0589
0590 if (pos >= p->interleave_ways) {
0591 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
0592 p->interleave_ways);
0593 rc = -ENXIO;
0594 goto out;
0595 }
0596
0597 cxled = p->targets[pos];
0598 if (!cxled)
0599 rc = sysfs_emit(buf, "\n");
0600 else
0601 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
0602 out:
0603 up_read(&cxl_region_rwsem);
0604
0605 return rc;
0606 }
0607
0608 static int match_free_decoder(struct device *dev, void *data)
0609 {
0610 struct cxl_decoder *cxld;
0611 int *id = data;
0612
0613 if (!is_switch_decoder(dev))
0614 return 0;
0615
0616 cxld = to_cxl_decoder(dev);
0617
0618
0619 if (cxld->id != *id)
0620 return 0;
0621
0622 if (!cxld->region)
0623 return 1;
0624
0625 (*id)++;
0626
0627 return 0;
0628 }
0629
0630 static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
0631 struct cxl_region *cxlr)
0632 {
0633 struct device *dev;
0634 int id = 0;
0635
0636 dev = device_find_child(&port->dev, &id, match_free_decoder);
0637 if (!dev)
0638 return NULL;
0639
0640
0641
0642
0643
0644
0645 put_device(dev);
0646 return to_cxl_decoder(dev);
0647 }
0648
0649 static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
0650 struct cxl_region *cxlr)
0651 {
0652 struct cxl_region_params *p = &cxlr->params;
0653 struct cxl_region_ref *cxl_rr, *iter;
0654 unsigned long index;
0655 int rc;
0656
0657 xa_for_each(&port->regions, index, iter) {
0658 struct cxl_region_params *ip = &iter->region->params;
0659
0660 if (ip->res->start > p->res->start) {
0661 dev_dbg(&cxlr->dev,
0662 "%s: HPA order violation %s:%pr vs %pr\n",
0663 dev_name(&port->dev),
0664 dev_name(&iter->region->dev), ip->res, p->res);
0665 return ERR_PTR(-EBUSY);
0666 }
0667 }
0668
0669 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
0670 if (!cxl_rr)
0671 return ERR_PTR(-ENOMEM);
0672 cxl_rr->port = port;
0673 cxl_rr->region = cxlr;
0674 cxl_rr->nr_targets = 1;
0675 xa_init(&cxl_rr->endpoints);
0676
0677 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
0678 if (rc) {
0679 dev_dbg(&cxlr->dev,
0680 "%s: failed to track region reference: %d\n",
0681 dev_name(&port->dev), rc);
0682 kfree(cxl_rr);
0683 return ERR_PTR(rc);
0684 }
0685
0686 return cxl_rr;
0687 }
0688
0689 static void free_region_ref(struct cxl_region_ref *cxl_rr)
0690 {
0691 struct cxl_port *port = cxl_rr->port;
0692 struct cxl_region *cxlr = cxl_rr->region;
0693 struct cxl_decoder *cxld = cxl_rr->decoder;
0694
0695 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
0696 if (cxld->region == cxlr) {
0697 cxld->region = NULL;
0698 put_device(&cxlr->dev);
0699 }
0700
0701 xa_erase(&port->regions, (unsigned long)cxlr);
0702 xa_destroy(&cxl_rr->endpoints);
0703 kfree(cxl_rr);
0704 }
0705
0706 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
0707 struct cxl_endpoint_decoder *cxled)
0708 {
0709 int rc;
0710 struct cxl_port *port = cxl_rr->port;
0711 struct cxl_region *cxlr = cxl_rr->region;
0712 struct cxl_decoder *cxld = cxl_rr->decoder;
0713 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
0714
0715 if (ep) {
0716 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
0717 GFP_KERNEL);
0718 if (rc)
0719 return rc;
0720 }
0721 cxl_rr->nr_eps++;
0722
0723 if (!cxld->region) {
0724 cxld->region = cxlr;
0725 get_device(&cxlr->dev);
0726 }
0727
0728 return 0;
0729 }
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755 static int cxl_port_attach_region(struct cxl_port *port,
0756 struct cxl_region *cxlr,
0757 struct cxl_endpoint_decoder *cxled, int pos)
0758 {
0759 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0760 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
0761 struct cxl_region_ref *cxl_rr;
0762 bool nr_targets_inc = false;
0763 struct cxl_decoder *cxld;
0764 unsigned long index;
0765 int rc = -EBUSY;
0766
0767 lockdep_assert_held_write(&cxl_region_rwsem);
0768
0769 cxl_rr = cxl_rr_load(port, cxlr);
0770 if (cxl_rr) {
0771 struct cxl_ep *ep_iter;
0772 int found = 0;
0773
0774
0775
0776
0777
0778
0779
0780 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
0781 if (ep_iter == ep)
0782 continue;
0783 if (ep_iter->next == ep->next) {
0784 found++;
0785 break;
0786 }
0787 }
0788
0789
0790
0791
0792
0793 if (!found || !ep->next) {
0794 cxl_rr->nr_targets++;
0795 nr_targets_inc = true;
0796 }
0797
0798
0799
0800
0801
0802 cxld = cxl_rr->decoder;
0803 } else {
0804 cxl_rr = alloc_region_ref(port, cxlr);
0805 if (IS_ERR(cxl_rr)) {
0806 dev_dbg(&cxlr->dev,
0807 "%s: failed to allocate region reference\n",
0808 dev_name(&port->dev));
0809 return PTR_ERR(cxl_rr);
0810 }
0811 nr_targets_inc = true;
0812
0813 if (port == cxled_to_port(cxled))
0814 cxld = &cxled->cxld;
0815 else
0816 cxld = cxl_region_find_decoder(port, cxlr);
0817 if (!cxld) {
0818 dev_dbg(&cxlr->dev, "%s: no decoder available\n",
0819 dev_name(&port->dev));
0820 goto out_erase;
0821 }
0822
0823 if (cxld->region) {
0824 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
0825 dev_name(&port->dev), dev_name(&cxld->dev),
0826 dev_name(&cxld->region->dev));
0827 rc = -EBUSY;
0828 goto out_erase;
0829 }
0830
0831 cxl_rr->decoder = cxld;
0832 }
0833
0834 rc = cxl_rr_ep_add(cxl_rr, cxled);
0835 if (rc) {
0836 dev_dbg(&cxlr->dev,
0837 "%s: failed to track endpoint %s:%s reference\n",
0838 dev_name(&port->dev), dev_name(&cxlmd->dev),
0839 dev_name(&cxld->dev));
0840 goto out_erase;
0841 }
0842
0843 dev_dbg(&cxlr->dev,
0844 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
0845 dev_name(port->uport), dev_name(&port->dev),
0846 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
0847 dev_name(&cxled->cxld.dev), pos,
0848 ep ? ep->next ? dev_name(ep->next->uport) :
0849 dev_name(&cxlmd->dev) :
0850 "none",
0851 cxl_rr->nr_eps, cxl_rr->nr_targets);
0852
0853 return 0;
0854 out_erase:
0855 if (nr_targets_inc)
0856 cxl_rr->nr_targets--;
0857 if (cxl_rr->nr_eps == 0)
0858 free_region_ref(cxl_rr);
0859 return rc;
0860 }
0861
0862 static void cxl_port_detach_region(struct cxl_port *port,
0863 struct cxl_region *cxlr,
0864 struct cxl_endpoint_decoder *cxled)
0865 {
0866 struct cxl_region_ref *cxl_rr;
0867 struct cxl_ep *ep = NULL;
0868
0869 lockdep_assert_held_write(&cxl_region_rwsem);
0870
0871 cxl_rr = cxl_rr_load(port, cxlr);
0872 if (!cxl_rr)
0873 return;
0874
0875
0876
0877
0878
0879 if (cxl_rr->decoder == &cxled->cxld)
0880 cxl_rr->nr_eps--;
0881 else
0882 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
0883 if (ep) {
0884 struct cxl_ep *ep_iter;
0885 unsigned long index;
0886 int found = 0;
0887
0888 cxl_rr->nr_eps--;
0889 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
0890 if (ep_iter->next == ep->next) {
0891 found++;
0892 break;
0893 }
0894 }
0895 if (!found)
0896 cxl_rr->nr_targets--;
0897 }
0898
0899 if (cxl_rr->nr_eps == 0)
0900 free_region_ref(cxl_rr);
0901 }
0902
0903 static int check_last_peer(struct cxl_endpoint_decoder *cxled,
0904 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
0905 int distance)
0906 {
0907 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0908 struct cxl_region *cxlr = cxl_rr->region;
0909 struct cxl_region_params *p = &cxlr->params;
0910 struct cxl_endpoint_decoder *cxled_peer;
0911 struct cxl_port *port = cxl_rr->port;
0912 struct cxl_memdev *cxlmd_peer;
0913 struct cxl_ep *ep_peer;
0914 int pos = cxled->pos;
0915
0916
0917
0918
0919
0920
0921 if (pos < distance) {
0922 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
0923 dev_name(port->uport), dev_name(&port->dev),
0924 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
0925 return -ENXIO;
0926 }
0927 cxled_peer = p->targets[pos - distance];
0928 cxlmd_peer = cxled_to_memdev(cxled_peer);
0929 ep_peer = cxl_ep_load(port, cxlmd_peer);
0930 if (ep->dport != ep_peer->dport) {
0931 dev_dbg(&cxlr->dev,
0932 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
0933 dev_name(port->uport), dev_name(&port->dev),
0934 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
0935 dev_name(&cxlmd_peer->dev),
0936 dev_name(&cxled_peer->cxld.dev));
0937 return -ENXIO;
0938 }
0939
0940 return 0;
0941 }
0942
0943 static int cxl_port_setup_targets(struct cxl_port *port,
0944 struct cxl_region *cxlr,
0945 struct cxl_endpoint_decoder *cxled)
0946 {
0947 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
0948 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
0949 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
0950 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
0951 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
0952 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
0953 struct cxl_region_params *p = &cxlr->params;
0954 struct cxl_decoder *cxld = cxl_rr->decoder;
0955 struct cxl_switch_decoder *cxlsd;
0956 u16 eig, peig;
0957 u8 eiw, peiw;
0958
0959
0960
0961
0962
0963 if (!is_power_of_2(cxl_rr->nr_targets)) {
0964 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
0965 dev_name(port->uport), dev_name(&port->dev),
0966 cxl_rr->nr_targets);
0967 return -EINVAL;
0968 }
0969
0970 cxlsd = to_cxl_switch_decoder(&cxld->dev);
0971 if (cxl_rr->nr_targets_set) {
0972 int i, distance;
0973
0974 distance = p->nr_targets / cxl_rr->nr_targets;
0975 for (i = 0; i < cxl_rr->nr_targets_set; i++)
0976 if (ep->dport == cxlsd->target[i]) {
0977 rc = check_last_peer(cxled, ep, cxl_rr,
0978 distance);
0979 if (rc)
0980 return rc;
0981 goto out_target_set;
0982 }
0983 goto add_target;
0984 }
0985
0986 if (is_cxl_root(parent_port)) {
0987 parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
0988 parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
0989
0990
0991
0992
0993 if (!is_power_of_2(parent_iw))
0994 parent_iw /= 3;
0995 } else {
0996 struct cxl_region_ref *parent_rr;
0997 struct cxl_decoder *parent_cxld;
0998
0999 parent_rr = cxl_rr_load(parent_port, cxlr);
1000 parent_cxld = parent_rr->decoder;
1001 parent_ig = parent_cxld->interleave_granularity;
1002 parent_iw = parent_cxld->interleave_ways;
1003 }
1004
1005 rc = granularity_to_cxl(parent_ig, &peig);
1006 if (rc) {
1007 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
1008 dev_name(parent_port->uport),
1009 dev_name(&parent_port->dev), parent_ig);
1010 return rc;
1011 }
1012
1013 rc = ways_to_cxl(parent_iw, &peiw);
1014 if (rc) {
1015 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
1016 dev_name(parent_port->uport),
1017 dev_name(&parent_port->dev), parent_iw);
1018 return rc;
1019 }
1020
1021 iw = cxl_rr->nr_targets;
1022 rc = ways_to_cxl(iw, &eiw);
1023 if (rc) {
1024 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
1025 dev_name(port->uport), dev_name(&port->dev), iw);
1026 return rc;
1027 }
1028
1029
1030
1031
1032
1033 if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
1034 u32 address_bit = max(peig + peiw, eiw + peig);
1035
1036 eig = address_bit - eiw + 1;
1037 } else {
1038 eiw = peiw;
1039 eig = peig;
1040 }
1041
1042 rc = cxl_to_granularity(eig, &ig);
1043 if (rc) {
1044 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
1045 dev_name(port->uport), dev_name(&port->dev),
1046 256 << eig);
1047 return rc;
1048 }
1049
1050 cxld->interleave_ways = iw;
1051 cxld->interleave_granularity = ig;
1052 cxld->hpa_range = (struct range) {
1053 .start = p->res->start,
1054 .end = p->res->end,
1055 };
1056 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
1057 dev_name(&port->dev), iw, ig);
1058 add_target:
1059 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
1060 dev_dbg(&cxlr->dev,
1061 "%s:%s: targets full trying to add %s:%s at %d\n",
1062 dev_name(port->uport), dev_name(&port->dev),
1063 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1064 return -ENXIO;
1065 }
1066 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
1067 inc = 1;
1068 out_target_set:
1069 cxl_rr->nr_targets_set += inc;
1070 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1071 dev_name(port->uport), dev_name(&port->dev),
1072 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
1073 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1074
1075 return 0;
1076 }
1077
1078 static void cxl_port_reset_targets(struct cxl_port *port,
1079 struct cxl_region *cxlr)
1080 {
1081 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1082 struct cxl_decoder *cxld;
1083
1084
1085
1086
1087
1088 if (!cxl_rr)
1089 return;
1090 cxl_rr->nr_targets_set = 0;
1091
1092 cxld = cxl_rr->decoder;
1093 cxld->hpa_range = (struct range) {
1094 .start = 0,
1095 .end = -1,
1096 };
1097 }
1098
1099 static void cxl_region_teardown_targets(struct cxl_region *cxlr)
1100 {
1101 struct cxl_region_params *p = &cxlr->params;
1102 struct cxl_endpoint_decoder *cxled;
1103 struct cxl_memdev *cxlmd;
1104 struct cxl_port *iter;
1105 struct cxl_ep *ep;
1106 int i;
1107
1108 for (i = 0; i < p->nr_targets; i++) {
1109 cxled = p->targets[i];
1110 cxlmd = cxled_to_memdev(cxled);
1111
1112 iter = cxled_to_port(cxled);
1113 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1114 iter = to_cxl_port(iter->dev.parent);
1115
1116 for (ep = cxl_ep_load(iter, cxlmd); iter;
1117 iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
1118 cxl_port_reset_targets(iter, cxlr);
1119 }
1120 }
1121
1122 static int cxl_region_setup_targets(struct cxl_region *cxlr)
1123 {
1124 struct cxl_region_params *p = &cxlr->params;
1125 struct cxl_endpoint_decoder *cxled;
1126 struct cxl_memdev *cxlmd;
1127 struct cxl_port *iter;
1128 struct cxl_ep *ep;
1129 int i, rc;
1130
1131 for (i = 0; i < p->nr_targets; i++) {
1132 cxled = p->targets[i];
1133 cxlmd = cxled_to_memdev(cxled);
1134
1135 iter = cxled_to_port(cxled);
1136 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1137 iter = to_cxl_port(iter->dev.parent);
1138
1139
1140
1141
1142
1143 for (ep = cxl_ep_load(iter, cxlmd); iter;
1144 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
1145 rc = cxl_port_setup_targets(iter, cxlr, cxled);
1146 if (rc) {
1147 cxl_region_teardown_targets(cxlr);
1148 return rc;
1149 }
1150 }
1151 }
1152
1153 return 0;
1154 }
1155
1156 static int cxl_region_attach(struct cxl_region *cxlr,
1157 struct cxl_endpoint_decoder *cxled, int pos)
1158 {
1159 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1160 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1161 struct cxl_port *ep_port, *root_port, *iter;
1162 struct cxl_region_params *p = &cxlr->params;
1163 struct cxl_dport *dport;
1164 int i, rc = -ENXIO;
1165
1166 if (cxled->mode == CXL_DECODER_DEAD) {
1167 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
1168 return -ENODEV;
1169 }
1170
1171
1172 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
1173 dev_dbg(&cxlr->dev, "region already active\n");
1174 return -EBUSY;
1175 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
1176 dev_dbg(&cxlr->dev, "interleave config missing\n");
1177 return -ENXIO;
1178 }
1179
1180 if (pos < 0 || pos >= p->interleave_ways) {
1181 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1182 p->interleave_ways);
1183 return -ENXIO;
1184 }
1185
1186 if (p->targets[pos] == cxled)
1187 return 0;
1188
1189 if (p->targets[pos]) {
1190 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1191 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1192
1193 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1194 pos, dev_name(&cxlmd_target->dev),
1195 dev_name(&cxled_target->cxld.dev));
1196 return -EBUSY;
1197 }
1198
1199 for (i = 0; i < p->interleave_ways; i++) {
1200 struct cxl_endpoint_decoder *cxled_target;
1201 struct cxl_memdev *cxlmd_target;
1202
1203 cxled_target = p->targets[pos];
1204 if (!cxled_target)
1205 continue;
1206
1207 cxlmd_target = cxled_to_memdev(cxled_target);
1208 if (cxlmd_target == cxlmd) {
1209 dev_dbg(&cxlr->dev,
1210 "%s already specified at position %d via: %s\n",
1211 dev_name(&cxlmd->dev), pos,
1212 dev_name(&cxled_target->cxld.dev));
1213 return -EBUSY;
1214 }
1215 }
1216
1217 ep_port = cxled_to_port(cxled);
1218 root_port = cxlrd_to_port(cxlrd);
1219 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1220 if (!dport) {
1221 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1222 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1223 dev_name(cxlr->dev.parent));
1224 return -ENXIO;
1225 }
1226
1227 if (cxlrd->calc_hb(cxlrd, pos) != dport) {
1228 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
1229 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1230 dev_name(&cxlrd->cxlsd.cxld.dev));
1231 return -ENXIO;
1232 }
1233
1234 if (cxled->cxld.target_type != cxlr->type) {
1235 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1236 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1237 cxled->cxld.target_type, cxlr->type);
1238 return -ENXIO;
1239 }
1240
1241 if (!cxled->dpa_res) {
1242 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1243 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1244 return -ENXIO;
1245 }
1246
1247 if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1248 resource_size(p->res)) {
1249 dev_dbg(&cxlr->dev,
1250 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1251 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1252 (u64)resource_size(cxled->dpa_res), p->interleave_ways,
1253 (u64)resource_size(p->res));
1254 return -EINVAL;
1255 }
1256
1257 for (iter = ep_port; !is_cxl_root(iter);
1258 iter = to_cxl_port(iter->dev.parent)) {
1259 rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
1260 if (rc)
1261 goto err;
1262 }
1263
1264 p->targets[pos] = cxled;
1265 cxled->pos = pos;
1266 p->nr_targets++;
1267
1268 if (p->nr_targets == p->interleave_ways) {
1269 rc = cxl_region_setup_targets(cxlr);
1270 if (rc)
1271 goto err_decrement;
1272 p->state = CXL_CONFIG_ACTIVE;
1273 }
1274
1275 cxled->cxld.interleave_ways = p->interleave_ways;
1276 cxled->cxld.interleave_granularity = p->interleave_granularity;
1277 cxled->cxld.hpa_range = (struct range) {
1278 .start = p->res->start,
1279 .end = p->res->end,
1280 };
1281
1282 return 0;
1283
1284 err_decrement:
1285 p->nr_targets--;
1286 err:
1287 for (iter = ep_port; !is_cxl_root(iter);
1288 iter = to_cxl_port(iter->dev.parent))
1289 cxl_port_detach_region(iter, cxlr, cxled);
1290 return rc;
1291 }
1292
1293 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
1294 {
1295 struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
1296 struct cxl_region *cxlr = cxled->cxld.region;
1297 struct cxl_region_params *p;
1298 int rc = 0;
1299
1300 lockdep_assert_held_write(&cxl_region_rwsem);
1301
1302 if (!cxlr)
1303 return 0;
1304
1305 p = &cxlr->params;
1306 get_device(&cxlr->dev);
1307
1308 if (p->state > CXL_CONFIG_ACTIVE) {
1309
1310
1311
1312
1313 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
1314 if (rc)
1315 goto out;
1316 p->state = CXL_CONFIG_ACTIVE;
1317 }
1318
1319 for (iter = ep_port; !is_cxl_root(iter);
1320 iter = to_cxl_port(iter->dev.parent))
1321 cxl_port_detach_region(iter, cxlr, cxled);
1322
1323 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
1324 p->targets[cxled->pos] != cxled) {
1325 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1326
1327 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
1328 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1329 cxled->pos);
1330 goto out;
1331 }
1332
1333 if (p->state == CXL_CONFIG_ACTIVE) {
1334 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
1335 cxl_region_teardown_targets(cxlr);
1336 }
1337 p->targets[cxled->pos] = NULL;
1338 p->nr_targets--;
1339 cxled->cxld.hpa_range = (struct range) {
1340 .start = 0,
1341 .end = -1,
1342 };
1343
1344
1345 up_write(&cxl_region_rwsem);
1346 device_release_driver(&cxlr->dev);
1347 down_write(&cxl_region_rwsem);
1348 out:
1349 put_device(&cxlr->dev);
1350 return rc;
1351 }
1352
1353 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
1354 {
1355 down_write(&cxl_region_rwsem);
1356 cxled->mode = CXL_DECODER_DEAD;
1357 cxl_region_detach(cxled);
1358 up_write(&cxl_region_rwsem);
1359 }
1360
1361 static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
1362 {
1363 struct device *dev;
1364 int rc;
1365
1366 dev = bus_find_device_by_name(&cxl_bus_type, NULL, decoder);
1367 if (!dev)
1368 return -ENODEV;
1369
1370 if (!is_endpoint_decoder(dev)) {
1371 put_device(dev);
1372 return -EINVAL;
1373 }
1374
1375 rc = down_write_killable(&cxl_region_rwsem);
1376 if (rc)
1377 goto out;
1378 down_read(&cxl_dpa_rwsem);
1379 rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
1380 up_read(&cxl_dpa_rwsem);
1381 up_write(&cxl_region_rwsem);
1382 out:
1383 put_device(dev);
1384 return rc;
1385 }
1386
1387 static int detach_target(struct cxl_region *cxlr, int pos)
1388 {
1389 struct cxl_region_params *p = &cxlr->params;
1390 int rc;
1391
1392 rc = down_write_killable(&cxl_region_rwsem);
1393 if (rc)
1394 return rc;
1395
1396 if (pos >= p->interleave_ways) {
1397 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1398 p->interleave_ways);
1399 rc = -ENXIO;
1400 goto out;
1401 }
1402
1403 if (!p->targets[pos]) {
1404 rc = 0;
1405 goto out;
1406 }
1407
1408 rc = cxl_region_detach(p->targets[pos]);
1409 out:
1410 up_write(&cxl_region_rwsem);
1411 return rc;
1412 }
1413
1414 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
1415 size_t len)
1416 {
1417 int rc;
1418
1419 if (sysfs_streq(buf, "\n"))
1420 rc = detach_target(cxlr, pos);
1421 else
1422 rc = attach_target(cxlr, buf, pos);
1423
1424 if (rc < 0)
1425 return rc;
1426 return len;
1427 }
1428
1429 #define TARGET_ATTR_RW(n) \
1430 static ssize_t target##n##_show( \
1431 struct device *dev, struct device_attribute *attr, char *buf) \
1432 { \
1433 return show_targetN(to_cxl_region(dev), buf, (n)); \
1434 } \
1435 static ssize_t target##n##_store(struct device *dev, \
1436 struct device_attribute *attr, \
1437 const char *buf, size_t len) \
1438 { \
1439 return store_targetN(to_cxl_region(dev), buf, (n), len); \
1440 } \
1441 static DEVICE_ATTR_RW(target##n)
1442
1443 TARGET_ATTR_RW(0);
1444 TARGET_ATTR_RW(1);
1445 TARGET_ATTR_RW(2);
1446 TARGET_ATTR_RW(3);
1447 TARGET_ATTR_RW(4);
1448 TARGET_ATTR_RW(5);
1449 TARGET_ATTR_RW(6);
1450 TARGET_ATTR_RW(7);
1451 TARGET_ATTR_RW(8);
1452 TARGET_ATTR_RW(9);
1453 TARGET_ATTR_RW(10);
1454 TARGET_ATTR_RW(11);
1455 TARGET_ATTR_RW(12);
1456 TARGET_ATTR_RW(13);
1457 TARGET_ATTR_RW(14);
1458 TARGET_ATTR_RW(15);
1459
1460 static struct attribute *target_attrs[] = {
1461 &dev_attr_target0.attr,
1462 &dev_attr_target1.attr,
1463 &dev_attr_target2.attr,
1464 &dev_attr_target3.attr,
1465 &dev_attr_target4.attr,
1466 &dev_attr_target5.attr,
1467 &dev_attr_target6.attr,
1468 &dev_attr_target7.attr,
1469 &dev_attr_target8.attr,
1470 &dev_attr_target9.attr,
1471 &dev_attr_target10.attr,
1472 &dev_attr_target11.attr,
1473 &dev_attr_target12.attr,
1474 &dev_attr_target13.attr,
1475 &dev_attr_target14.attr,
1476 &dev_attr_target15.attr,
1477 NULL,
1478 };
1479
1480 static umode_t cxl_region_target_visible(struct kobject *kobj,
1481 struct attribute *a, int n)
1482 {
1483 struct device *dev = kobj_to_dev(kobj);
1484 struct cxl_region *cxlr = to_cxl_region(dev);
1485 struct cxl_region_params *p = &cxlr->params;
1486
1487 if (n < p->interleave_ways)
1488 return a->mode;
1489 return 0;
1490 }
1491
1492 static const struct attribute_group cxl_region_target_group = {
1493 .attrs = target_attrs,
1494 .is_visible = cxl_region_target_visible,
1495 };
1496
1497 static const struct attribute_group *get_cxl_region_target_group(void)
1498 {
1499 return &cxl_region_target_group;
1500 }
1501
1502 static const struct attribute_group *region_groups[] = {
1503 &cxl_base_attribute_group,
1504 &cxl_region_group,
1505 &cxl_region_target_group,
1506 NULL,
1507 };
1508
1509 static void cxl_region_release(struct device *dev)
1510 {
1511 struct cxl_region *cxlr = to_cxl_region(dev);
1512
1513 memregion_free(cxlr->id);
1514 kfree(cxlr);
1515 }
1516
1517 const struct device_type cxl_region_type = {
1518 .name = "cxl_region",
1519 .release = cxl_region_release,
1520 .groups = region_groups
1521 };
1522
1523 bool is_cxl_region(struct device *dev)
1524 {
1525 return dev->type == &cxl_region_type;
1526 }
1527 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
1528
1529 static struct cxl_region *to_cxl_region(struct device *dev)
1530 {
1531 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
1532 "not a cxl_region device\n"))
1533 return NULL;
1534
1535 return container_of(dev, struct cxl_region, dev);
1536 }
1537
1538 static void unregister_region(void *dev)
1539 {
1540 struct cxl_region *cxlr = to_cxl_region(dev);
1541
1542 device_del(dev);
1543 cxl_region_iomem_release(cxlr);
1544 put_device(dev);
1545 }
1546
1547 static struct lock_class_key cxl_region_key;
1548
1549 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
1550 {
1551 struct cxl_region *cxlr;
1552 struct device *dev;
1553
1554 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
1555 if (!cxlr) {
1556 memregion_free(id);
1557 return ERR_PTR(-ENOMEM);
1558 }
1559
1560 dev = &cxlr->dev;
1561 device_initialize(dev);
1562 lockdep_set_class(&dev->mutex, &cxl_region_key);
1563 dev->parent = &cxlrd->cxlsd.cxld.dev;
1564 device_set_pm_not_required(dev);
1565 dev->bus = &cxl_bus_type;
1566 dev->type = &cxl_region_type;
1567 cxlr->id = id;
1568
1569 return cxlr;
1570 }
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
1586 int id,
1587 enum cxl_decoder_mode mode,
1588 enum cxl_decoder_type type)
1589 {
1590 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
1591 struct cxl_region *cxlr;
1592 struct device *dev;
1593 int rc;
1594
1595 cxlr = cxl_region_alloc(cxlrd, id);
1596 if (IS_ERR(cxlr))
1597 return cxlr;
1598 cxlr->mode = mode;
1599 cxlr->type = type;
1600
1601 dev = &cxlr->dev;
1602 rc = dev_set_name(dev, "region%d", id);
1603 if (rc)
1604 goto err;
1605
1606 rc = device_add(dev);
1607 if (rc)
1608 goto err;
1609
1610 rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr);
1611 if (rc)
1612 return ERR_PTR(rc);
1613
1614 dev_dbg(port->uport, "%s: created %s\n",
1615 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
1616 return cxlr;
1617
1618 err:
1619 put_device(dev);
1620 return ERR_PTR(rc);
1621 }
1622
1623 static ssize_t create_pmem_region_show(struct device *dev,
1624 struct device_attribute *attr, char *buf)
1625 {
1626 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
1627
1628 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
1629 }
1630
1631 static ssize_t create_pmem_region_store(struct device *dev,
1632 struct device_attribute *attr,
1633 const char *buf, size_t len)
1634 {
1635 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
1636 struct cxl_region *cxlr;
1637 int id, rc;
1638
1639 rc = sscanf(buf, "region%d\n", &id);
1640 if (rc != 1)
1641 return -EINVAL;
1642
1643 rc = memregion_alloc(GFP_KERNEL);
1644 if (rc < 0)
1645 return rc;
1646
1647 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
1648 memregion_free(rc);
1649 return -EBUSY;
1650 }
1651
1652 cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM,
1653 CXL_DECODER_EXPANDER);
1654 if (IS_ERR(cxlr))
1655 return PTR_ERR(cxlr);
1656
1657 return len;
1658 }
1659 DEVICE_ATTR_RW(create_pmem_region);
1660
1661 static ssize_t region_show(struct device *dev, struct device_attribute *attr,
1662 char *buf)
1663 {
1664 struct cxl_decoder *cxld = to_cxl_decoder(dev);
1665 ssize_t rc;
1666
1667 rc = down_read_interruptible(&cxl_region_rwsem);
1668 if (rc)
1669 return rc;
1670
1671 if (cxld->region)
1672 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
1673 else
1674 rc = sysfs_emit(buf, "\n");
1675 up_read(&cxl_region_rwsem);
1676
1677 return rc;
1678 }
1679 DEVICE_ATTR_RO(region);
1680
1681 static struct cxl_region *
1682 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
1683 {
1684 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
1685 struct device *region_dev;
1686
1687 region_dev = device_find_child_by_name(&cxld->dev, name);
1688 if (!region_dev)
1689 return ERR_PTR(-ENODEV);
1690
1691 return to_cxl_region(region_dev);
1692 }
1693
1694 static ssize_t delete_region_store(struct device *dev,
1695 struct device_attribute *attr,
1696 const char *buf, size_t len)
1697 {
1698 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
1699 struct cxl_port *port = to_cxl_port(dev->parent);
1700 struct cxl_region *cxlr;
1701
1702 cxlr = cxl_find_region_by_name(cxlrd, buf);
1703 if (IS_ERR(cxlr))
1704 return PTR_ERR(cxlr);
1705
1706 devm_release_action(port->uport, unregister_region, cxlr);
1707 put_device(&cxlr->dev);
1708
1709 return len;
1710 }
1711 DEVICE_ATTR_WO(delete_region);
1712
1713 static void cxl_pmem_region_release(struct device *dev)
1714 {
1715 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
1716 int i;
1717
1718 for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
1719 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
1720
1721 put_device(&cxlmd->dev);
1722 }
1723
1724 kfree(cxlr_pmem);
1725 }
1726
1727 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
1728 &cxl_base_attribute_group,
1729 NULL,
1730 };
1731
1732 const struct device_type cxl_pmem_region_type = {
1733 .name = "cxl_pmem_region",
1734 .release = cxl_pmem_region_release,
1735 .groups = cxl_pmem_region_attribute_groups,
1736 };
1737
1738 bool is_cxl_pmem_region(struct device *dev)
1739 {
1740 return dev->type == &cxl_pmem_region_type;
1741 }
1742 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
1743
1744 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
1745 {
1746 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
1747 "not a cxl_pmem_region device\n"))
1748 return NULL;
1749 return container_of(dev, struct cxl_pmem_region, dev);
1750 }
1751 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
1752
1753 static struct lock_class_key cxl_pmem_region_key;
1754
1755 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
1756 {
1757 struct cxl_region_params *p = &cxlr->params;
1758 struct cxl_pmem_region *cxlr_pmem;
1759 struct device *dev;
1760 int i;
1761
1762 down_read(&cxl_region_rwsem);
1763 if (p->state != CXL_CONFIG_COMMIT) {
1764 cxlr_pmem = ERR_PTR(-ENXIO);
1765 goto out;
1766 }
1767
1768 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
1769 GFP_KERNEL);
1770 if (!cxlr_pmem) {
1771 cxlr_pmem = ERR_PTR(-ENOMEM);
1772 goto out;
1773 }
1774
1775 cxlr_pmem->hpa_range.start = p->res->start;
1776 cxlr_pmem->hpa_range.end = p->res->end;
1777
1778
1779 cxlr_pmem->nr_mappings = p->nr_targets;
1780 for (i = 0; i < p->nr_targets; i++) {
1781 struct cxl_endpoint_decoder *cxled = p->targets[i];
1782 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1783 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
1784
1785 m->cxlmd = cxlmd;
1786 get_device(&cxlmd->dev);
1787 m->start = cxled->dpa_res->start;
1788 m->size = resource_size(cxled->dpa_res);
1789 m->position = i;
1790 }
1791
1792 dev = &cxlr_pmem->dev;
1793 cxlr_pmem->cxlr = cxlr;
1794 device_initialize(dev);
1795 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
1796 device_set_pm_not_required(dev);
1797 dev->parent = &cxlr->dev;
1798 dev->bus = &cxl_bus_type;
1799 dev->type = &cxl_pmem_region_type;
1800 out:
1801 up_read(&cxl_region_rwsem);
1802
1803 return cxlr_pmem;
1804 }
1805
1806 static void cxlr_pmem_unregister(void *dev)
1807 {
1808 device_unregister(dev);
1809 }
1810
1811
1812
1813
1814
1815
1816
1817 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
1818 {
1819 struct cxl_pmem_region *cxlr_pmem;
1820 struct device *dev;
1821 int rc;
1822
1823 cxlr_pmem = cxl_pmem_region_alloc(cxlr);
1824 if (IS_ERR(cxlr_pmem))
1825 return PTR_ERR(cxlr_pmem);
1826
1827 dev = &cxlr_pmem->dev;
1828 rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
1829 if (rc)
1830 goto err;
1831
1832 rc = device_add(dev);
1833 if (rc)
1834 goto err;
1835
1836 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
1837 dev_name(dev));
1838
1839 return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev);
1840
1841 err:
1842 put_device(dev);
1843 return rc;
1844 }
1845
1846 static int cxl_region_probe(struct device *dev)
1847 {
1848 struct cxl_region *cxlr = to_cxl_region(dev);
1849 struct cxl_region_params *p = &cxlr->params;
1850 int rc;
1851
1852 rc = down_read_interruptible(&cxl_region_rwsem);
1853 if (rc) {
1854 dev_dbg(&cxlr->dev, "probe interrupted\n");
1855 return rc;
1856 }
1857
1858 if (p->state < CXL_CONFIG_COMMIT) {
1859 dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
1860 rc = -ENXIO;
1861 }
1862
1863
1864
1865
1866
1867 up_read(&cxl_region_rwsem);
1868
1869 switch (cxlr->mode) {
1870 case CXL_DECODER_PMEM:
1871 return devm_cxl_add_pmem_region(cxlr);
1872 default:
1873 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
1874 cxlr->mode);
1875 return -ENXIO;
1876 }
1877 }
1878
1879 static struct cxl_driver cxl_region_driver = {
1880 .name = "cxl_region",
1881 .probe = cxl_region_probe,
1882 .id = CXL_DEVICE_REGION,
1883 };
1884
1885 int cxl_region_init(void)
1886 {
1887 return cxl_driver_register(&cxl_region_driver);
1888 }
1889
1890 void cxl_region_exit(void)
1891 {
1892 cxl_driver_unregister(&cxl_region_driver);
1893 }
1894
1895 MODULE_IMPORT_NS(CXL);
1896 MODULE_ALIAS_CXL(CXL_DEVICE_REGION);