0001
0002
0003 #include <linux/init.h>
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <linux/pci.h>
0007 #include <linux/device.h>
0008 #include <linux/io-64-nonatomic-lo-hi.h>
0009 #include <uapi/linux/idxd.h>
0010 #include "registers.h"
0011 #include "idxd.h"
0012
0013 static char *idxd_wq_type_names[] = {
0014 [IDXD_WQT_NONE] = "none",
0015 [IDXD_WQT_KERNEL] = "kernel",
0016 [IDXD_WQT_USER] = "user",
0017 };
0018
0019
0020 static ssize_t engine_group_id_show(struct device *dev,
0021 struct device_attribute *attr, char *buf)
0022 {
0023 struct idxd_engine *engine = confdev_to_engine(dev);
0024
0025 if (engine->group)
0026 return sysfs_emit(buf, "%d\n", engine->group->id);
0027 else
0028 return sysfs_emit(buf, "%d\n", -1);
0029 }
0030
0031 static ssize_t engine_group_id_store(struct device *dev,
0032 struct device_attribute *attr,
0033 const char *buf, size_t count)
0034 {
0035 struct idxd_engine *engine = confdev_to_engine(dev);
0036 struct idxd_device *idxd = engine->idxd;
0037 long id;
0038 int rc;
0039 struct idxd_group *prevg;
0040
0041 rc = kstrtol(buf, 10, &id);
0042 if (rc < 0)
0043 return -EINVAL;
0044
0045 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0046 return -EPERM;
0047
0048 if (id > idxd->max_groups - 1 || id < -1)
0049 return -EINVAL;
0050
0051 if (id == -1) {
0052 if (engine->group) {
0053 engine->group->num_engines--;
0054 engine->group = NULL;
0055 }
0056 return count;
0057 }
0058
0059 prevg = engine->group;
0060
0061 if (prevg)
0062 prevg->num_engines--;
0063 engine->group = idxd->groups[id];
0064 engine->group->num_engines++;
0065
0066 return count;
0067 }
0068
0069 static struct device_attribute dev_attr_engine_group =
0070 __ATTR(group_id, 0644, engine_group_id_show,
0071 engine_group_id_store);
0072
0073 static struct attribute *idxd_engine_attributes[] = {
0074 &dev_attr_engine_group.attr,
0075 NULL,
0076 };
0077
0078 static const struct attribute_group idxd_engine_attribute_group = {
0079 .attrs = idxd_engine_attributes,
0080 };
0081
0082 static const struct attribute_group *idxd_engine_attribute_groups[] = {
0083 &idxd_engine_attribute_group,
0084 NULL,
0085 };
0086
0087 static void idxd_conf_engine_release(struct device *dev)
0088 {
0089 struct idxd_engine *engine = confdev_to_engine(dev);
0090
0091 kfree(engine);
0092 }
0093
0094 struct device_type idxd_engine_device_type = {
0095 .name = "engine",
0096 .release = idxd_conf_engine_release,
0097 .groups = idxd_engine_attribute_groups,
0098 };
0099
0100
0101
0102 static void idxd_set_free_rdbufs(struct idxd_device *idxd)
0103 {
0104 int i, rdbufs;
0105
0106 for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
0107 struct idxd_group *g = idxd->groups[i];
0108
0109 rdbufs += g->rdbufs_reserved;
0110 }
0111
0112 idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
0113 }
0114
0115 static ssize_t group_read_buffers_reserved_show(struct device *dev,
0116 struct device_attribute *attr,
0117 char *buf)
0118 {
0119 struct idxd_group *group = confdev_to_group(dev);
0120
0121 return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
0122 }
0123
0124 static ssize_t group_tokens_reserved_show(struct device *dev,
0125 struct device_attribute *attr,
0126 char *buf)
0127 {
0128 dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
0129 return group_read_buffers_reserved_show(dev, attr, buf);
0130 }
0131
0132 static ssize_t group_read_buffers_reserved_store(struct device *dev,
0133 struct device_attribute *attr,
0134 const char *buf, size_t count)
0135 {
0136 struct idxd_group *group = confdev_to_group(dev);
0137 struct idxd_device *idxd = group->idxd;
0138 unsigned long val;
0139 int rc;
0140
0141 rc = kstrtoul(buf, 10, &val);
0142 if (rc < 0)
0143 return -EINVAL;
0144
0145 if (idxd->data->type == IDXD_TYPE_IAX)
0146 return -EOPNOTSUPP;
0147
0148 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0149 return -EPERM;
0150
0151 if (idxd->state == IDXD_DEV_ENABLED)
0152 return -EPERM;
0153
0154 if (val > idxd->max_rdbufs)
0155 return -EINVAL;
0156
0157 if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
0158 return -EINVAL;
0159
0160 group->rdbufs_reserved = val;
0161 idxd_set_free_rdbufs(idxd);
0162 return count;
0163 }
0164
0165 static ssize_t group_tokens_reserved_store(struct device *dev,
0166 struct device_attribute *attr,
0167 const char *buf, size_t count)
0168 {
0169 dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
0170 return group_read_buffers_reserved_store(dev, attr, buf, count);
0171 }
0172
0173 static struct device_attribute dev_attr_group_tokens_reserved =
0174 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
0175 group_tokens_reserved_store);
0176
0177 static struct device_attribute dev_attr_group_read_buffers_reserved =
0178 __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
0179 group_read_buffers_reserved_store);
0180
0181 static ssize_t group_read_buffers_allowed_show(struct device *dev,
0182 struct device_attribute *attr,
0183 char *buf)
0184 {
0185 struct idxd_group *group = confdev_to_group(dev);
0186
0187 return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
0188 }
0189
0190 static ssize_t group_tokens_allowed_show(struct device *dev,
0191 struct device_attribute *attr,
0192 char *buf)
0193 {
0194 dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
0195 return group_read_buffers_allowed_show(dev, attr, buf);
0196 }
0197
0198 static ssize_t group_read_buffers_allowed_store(struct device *dev,
0199 struct device_attribute *attr,
0200 const char *buf, size_t count)
0201 {
0202 struct idxd_group *group = confdev_to_group(dev);
0203 struct idxd_device *idxd = group->idxd;
0204 unsigned long val;
0205 int rc;
0206
0207 rc = kstrtoul(buf, 10, &val);
0208 if (rc < 0)
0209 return -EINVAL;
0210
0211 if (idxd->data->type == IDXD_TYPE_IAX)
0212 return -EOPNOTSUPP;
0213
0214 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0215 return -EPERM;
0216
0217 if (idxd->state == IDXD_DEV_ENABLED)
0218 return -EPERM;
0219
0220 if (val < 4 * group->num_engines ||
0221 val > group->rdbufs_reserved + idxd->nr_rdbufs)
0222 return -EINVAL;
0223
0224 group->rdbufs_allowed = val;
0225 return count;
0226 }
0227
0228 static ssize_t group_tokens_allowed_store(struct device *dev,
0229 struct device_attribute *attr,
0230 const char *buf, size_t count)
0231 {
0232 dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
0233 return group_read_buffers_allowed_store(dev, attr, buf, count);
0234 }
0235
0236 static struct device_attribute dev_attr_group_tokens_allowed =
0237 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
0238 group_tokens_allowed_store);
0239
0240 static struct device_attribute dev_attr_group_read_buffers_allowed =
0241 __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
0242 group_read_buffers_allowed_store);
0243
0244 static ssize_t group_use_read_buffer_limit_show(struct device *dev,
0245 struct device_attribute *attr,
0246 char *buf)
0247 {
0248 struct idxd_group *group = confdev_to_group(dev);
0249
0250 return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
0251 }
0252
0253 static ssize_t group_use_token_limit_show(struct device *dev,
0254 struct device_attribute *attr,
0255 char *buf)
0256 {
0257 dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
0258 return group_use_read_buffer_limit_show(dev, attr, buf);
0259 }
0260
0261 static ssize_t group_use_read_buffer_limit_store(struct device *dev,
0262 struct device_attribute *attr,
0263 const char *buf, size_t count)
0264 {
0265 struct idxd_group *group = confdev_to_group(dev);
0266 struct idxd_device *idxd = group->idxd;
0267 unsigned long val;
0268 int rc;
0269
0270 rc = kstrtoul(buf, 10, &val);
0271 if (rc < 0)
0272 return -EINVAL;
0273
0274 if (idxd->data->type == IDXD_TYPE_IAX)
0275 return -EOPNOTSUPP;
0276
0277 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0278 return -EPERM;
0279
0280 if (idxd->state == IDXD_DEV_ENABLED)
0281 return -EPERM;
0282
0283 if (idxd->rdbuf_limit == 0)
0284 return -EPERM;
0285
0286 group->use_rdbuf_limit = !!val;
0287 return count;
0288 }
0289
0290 static ssize_t group_use_token_limit_store(struct device *dev,
0291 struct device_attribute *attr,
0292 const char *buf, size_t count)
0293 {
0294 dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
0295 return group_use_read_buffer_limit_store(dev, attr, buf, count);
0296 }
0297
0298 static struct device_attribute dev_attr_group_use_token_limit =
0299 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
0300 group_use_token_limit_store);
0301
0302 static struct device_attribute dev_attr_group_use_read_buffer_limit =
0303 __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
0304 group_use_read_buffer_limit_store);
0305
0306 static ssize_t group_engines_show(struct device *dev,
0307 struct device_attribute *attr, char *buf)
0308 {
0309 struct idxd_group *group = confdev_to_group(dev);
0310 int i, rc = 0;
0311 struct idxd_device *idxd = group->idxd;
0312
0313 for (i = 0; i < idxd->max_engines; i++) {
0314 struct idxd_engine *engine = idxd->engines[i];
0315
0316 if (!engine->group)
0317 continue;
0318
0319 if (engine->group->id == group->id)
0320 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
0321 }
0322
0323 if (!rc)
0324 return 0;
0325 rc--;
0326 rc += sysfs_emit_at(buf, rc, "\n");
0327
0328 return rc;
0329 }
0330
0331 static struct device_attribute dev_attr_group_engines =
0332 __ATTR(engines, 0444, group_engines_show, NULL);
0333
0334 static ssize_t group_work_queues_show(struct device *dev,
0335 struct device_attribute *attr, char *buf)
0336 {
0337 struct idxd_group *group = confdev_to_group(dev);
0338 int i, rc = 0;
0339 struct idxd_device *idxd = group->idxd;
0340
0341 for (i = 0; i < idxd->max_wqs; i++) {
0342 struct idxd_wq *wq = idxd->wqs[i];
0343
0344 if (!wq->group)
0345 continue;
0346
0347 if (wq->group->id == group->id)
0348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
0349 }
0350
0351 if (!rc)
0352 return 0;
0353 rc--;
0354 rc += sysfs_emit_at(buf, rc, "\n");
0355
0356 return rc;
0357 }
0358
0359 static struct device_attribute dev_attr_group_work_queues =
0360 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
0361
0362 static ssize_t group_traffic_class_a_show(struct device *dev,
0363 struct device_attribute *attr,
0364 char *buf)
0365 {
0366 struct idxd_group *group = confdev_to_group(dev);
0367
0368 return sysfs_emit(buf, "%d\n", group->tc_a);
0369 }
0370
0371 static ssize_t group_traffic_class_a_store(struct device *dev,
0372 struct device_attribute *attr,
0373 const char *buf, size_t count)
0374 {
0375 struct idxd_group *group = confdev_to_group(dev);
0376 struct idxd_device *idxd = group->idxd;
0377 long val;
0378 int rc;
0379
0380 rc = kstrtol(buf, 10, &val);
0381 if (rc < 0)
0382 return -EINVAL;
0383
0384 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0385 return -EPERM;
0386
0387 if (idxd->state == IDXD_DEV_ENABLED)
0388 return -EPERM;
0389
0390 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
0391 return -EPERM;
0392
0393 if (val < 0 || val > 7)
0394 return -EINVAL;
0395
0396 group->tc_a = val;
0397 return count;
0398 }
0399
0400 static struct device_attribute dev_attr_group_traffic_class_a =
0401 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
0402 group_traffic_class_a_store);
0403
0404 static ssize_t group_traffic_class_b_show(struct device *dev,
0405 struct device_attribute *attr,
0406 char *buf)
0407 {
0408 struct idxd_group *group = confdev_to_group(dev);
0409
0410 return sysfs_emit(buf, "%d\n", group->tc_b);
0411 }
0412
0413 static ssize_t group_traffic_class_b_store(struct device *dev,
0414 struct device_attribute *attr,
0415 const char *buf, size_t count)
0416 {
0417 struct idxd_group *group = confdev_to_group(dev);
0418 struct idxd_device *idxd = group->idxd;
0419 long val;
0420 int rc;
0421
0422 rc = kstrtol(buf, 10, &val);
0423 if (rc < 0)
0424 return -EINVAL;
0425
0426 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0427 return -EPERM;
0428
0429 if (idxd->state == IDXD_DEV_ENABLED)
0430 return -EPERM;
0431
0432 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
0433 return -EPERM;
0434
0435 if (val < 0 || val > 7)
0436 return -EINVAL;
0437
0438 group->tc_b = val;
0439 return count;
0440 }
0441
0442 static struct device_attribute dev_attr_group_traffic_class_b =
0443 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
0444 group_traffic_class_b_store);
0445
0446 static struct attribute *idxd_group_attributes[] = {
0447 &dev_attr_group_work_queues.attr,
0448 &dev_attr_group_engines.attr,
0449 &dev_attr_group_use_token_limit.attr,
0450 &dev_attr_group_use_read_buffer_limit.attr,
0451 &dev_attr_group_tokens_allowed.attr,
0452 &dev_attr_group_read_buffers_allowed.attr,
0453 &dev_attr_group_tokens_reserved.attr,
0454 &dev_attr_group_read_buffers_reserved.attr,
0455 &dev_attr_group_traffic_class_a.attr,
0456 &dev_attr_group_traffic_class_b.attr,
0457 NULL,
0458 };
0459
0460 static const struct attribute_group idxd_group_attribute_group = {
0461 .attrs = idxd_group_attributes,
0462 };
0463
0464 static const struct attribute_group *idxd_group_attribute_groups[] = {
0465 &idxd_group_attribute_group,
0466 NULL,
0467 };
0468
0469 static void idxd_conf_group_release(struct device *dev)
0470 {
0471 struct idxd_group *group = confdev_to_group(dev);
0472
0473 kfree(group);
0474 }
0475
0476 struct device_type idxd_group_device_type = {
0477 .name = "group",
0478 .release = idxd_conf_group_release,
0479 .groups = idxd_group_attribute_groups,
0480 };
0481
0482
0483 static ssize_t wq_clients_show(struct device *dev,
0484 struct device_attribute *attr, char *buf)
0485 {
0486 struct idxd_wq *wq = confdev_to_wq(dev);
0487
0488 return sysfs_emit(buf, "%d\n", wq->client_count);
0489 }
0490
0491 static struct device_attribute dev_attr_wq_clients =
0492 __ATTR(clients, 0444, wq_clients_show, NULL);
0493
0494 static ssize_t wq_state_show(struct device *dev,
0495 struct device_attribute *attr, char *buf)
0496 {
0497 struct idxd_wq *wq = confdev_to_wq(dev);
0498
0499 switch (wq->state) {
0500 case IDXD_WQ_DISABLED:
0501 return sysfs_emit(buf, "disabled\n");
0502 case IDXD_WQ_ENABLED:
0503 return sysfs_emit(buf, "enabled\n");
0504 }
0505
0506 return sysfs_emit(buf, "unknown\n");
0507 }
0508
0509 static struct device_attribute dev_attr_wq_state =
0510 __ATTR(state, 0444, wq_state_show, NULL);
0511
0512 static ssize_t wq_group_id_show(struct device *dev,
0513 struct device_attribute *attr, char *buf)
0514 {
0515 struct idxd_wq *wq = confdev_to_wq(dev);
0516
0517 if (wq->group)
0518 return sysfs_emit(buf, "%u\n", wq->group->id);
0519 else
0520 return sysfs_emit(buf, "-1\n");
0521 }
0522
0523 static ssize_t wq_group_id_store(struct device *dev,
0524 struct device_attribute *attr,
0525 const char *buf, size_t count)
0526 {
0527 struct idxd_wq *wq = confdev_to_wq(dev);
0528 struct idxd_device *idxd = wq->idxd;
0529 long id;
0530 int rc;
0531 struct idxd_group *prevg, *group;
0532
0533 rc = kstrtol(buf, 10, &id);
0534 if (rc < 0)
0535 return -EINVAL;
0536
0537 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0538 return -EPERM;
0539
0540 if (wq->state != IDXD_WQ_DISABLED)
0541 return -EPERM;
0542
0543 if (id > idxd->max_groups - 1 || id < -1)
0544 return -EINVAL;
0545
0546 if (id == -1) {
0547 if (wq->group) {
0548 wq->group->num_wqs--;
0549 wq->group = NULL;
0550 }
0551 return count;
0552 }
0553
0554 group = idxd->groups[id];
0555 prevg = wq->group;
0556
0557 if (prevg)
0558 prevg->num_wqs--;
0559 wq->group = group;
0560 group->num_wqs++;
0561 return count;
0562 }
0563
0564 static struct device_attribute dev_attr_wq_group_id =
0565 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
0566
0567 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
0568 char *buf)
0569 {
0570 struct idxd_wq *wq = confdev_to_wq(dev);
0571
0572 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
0573 }
0574
0575 static ssize_t wq_mode_store(struct device *dev,
0576 struct device_attribute *attr, const char *buf,
0577 size_t count)
0578 {
0579 struct idxd_wq *wq = confdev_to_wq(dev);
0580 struct idxd_device *idxd = wq->idxd;
0581
0582 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0583 return -EPERM;
0584
0585 if (wq->state != IDXD_WQ_DISABLED)
0586 return -EPERM;
0587
0588 if (sysfs_streq(buf, "dedicated")) {
0589 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
0590 wq->threshold = 0;
0591 } else if (sysfs_streq(buf, "shared")) {
0592 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
0593 } else {
0594 return -EINVAL;
0595 }
0596
0597 return count;
0598 }
0599
0600 static struct device_attribute dev_attr_wq_mode =
0601 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
0602
0603 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
0604 char *buf)
0605 {
0606 struct idxd_wq *wq = confdev_to_wq(dev);
0607
0608 return sysfs_emit(buf, "%u\n", wq->size);
0609 }
0610
0611 static int total_claimed_wq_size(struct idxd_device *idxd)
0612 {
0613 int i;
0614 int wq_size = 0;
0615
0616 for (i = 0; i < idxd->max_wqs; i++) {
0617 struct idxd_wq *wq = idxd->wqs[i];
0618
0619 wq_size += wq->size;
0620 }
0621
0622 return wq_size;
0623 }
0624
0625 static ssize_t wq_size_store(struct device *dev,
0626 struct device_attribute *attr, const char *buf,
0627 size_t count)
0628 {
0629 struct idxd_wq *wq = confdev_to_wq(dev);
0630 unsigned long size;
0631 struct idxd_device *idxd = wq->idxd;
0632 int rc;
0633
0634 rc = kstrtoul(buf, 10, &size);
0635 if (rc < 0)
0636 return -EINVAL;
0637
0638 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0639 return -EPERM;
0640
0641 if (idxd->state == IDXD_DEV_ENABLED)
0642 return -EPERM;
0643
0644 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
0645 return -EINVAL;
0646
0647 wq->size = size;
0648 return count;
0649 }
0650
0651 static struct device_attribute dev_attr_wq_size =
0652 __ATTR(size, 0644, wq_size_show, wq_size_store);
0653
0654 static ssize_t wq_priority_show(struct device *dev,
0655 struct device_attribute *attr, char *buf)
0656 {
0657 struct idxd_wq *wq = confdev_to_wq(dev);
0658
0659 return sysfs_emit(buf, "%u\n", wq->priority);
0660 }
0661
0662 static ssize_t wq_priority_store(struct device *dev,
0663 struct device_attribute *attr,
0664 const char *buf, size_t count)
0665 {
0666 struct idxd_wq *wq = confdev_to_wq(dev);
0667 unsigned long prio;
0668 struct idxd_device *idxd = wq->idxd;
0669 int rc;
0670
0671 rc = kstrtoul(buf, 10, &prio);
0672 if (rc < 0)
0673 return -EINVAL;
0674
0675 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0676 return -EPERM;
0677
0678 if (wq->state != IDXD_WQ_DISABLED)
0679 return -EPERM;
0680
0681 if (prio > IDXD_MAX_PRIORITY)
0682 return -EINVAL;
0683
0684 wq->priority = prio;
0685 return count;
0686 }
0687
0688 static struct device_attribute dev_attr_wq_priority =
0689 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
0690
0691 static ssize_t wq_block_on_fault_show(struct device *dev,
0692 struct device_attribute *attr, char *buf)
0693 {
0694 struct idxd_wq *wq = confdev_to_wq(dev);
0695
0696 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
0697 }
0698
0699 static ssize_t wq_block_on_fault_store(struct device *dev,
0700 struct device_attribute *attr,
0701 const char *buf, size_t count)
0702 {
0703 struct idxd_wq *wq = confdev_to_wq(dev);
0704 struct idxd_device *idxd = wq->idxd;
0705 bool bof;
0706 int rc;
0707
0708 if (!idxd->hw.gen_cap.block_on_fault)
0709 return -EOPNOTSUPP;
0710
0711 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0712 return -EPERM;
0713
0714 if (wq->state != IDXD_WQ_DISABLED)
0715 return -ENXIO;
0716
0717 rc = kstrtobool(buf, &bof);
0718 if (rc < 0)
0719 return rc;
0720
0721 if (bof)
0722 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
0723 else
0724 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
0725
0726 return count;
0727 }
0728
0729 static struct device_attribute dev_attr_wq_block_on_fault =
0730 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
0731 wq_block_on_fault_store);
0732
0733 static ssize_t wq_threshold_show(struct device *dev,
0734 struct device_attribute *attr, char *buf)
0735 {
0736 struct idxd_wq *wq = confdev_to_wq(dev);
0737
0738 return sysfs_emit(buf, "%u\n", wq->threshold);
0739 }
0740
0741 static ssize_t wq_threshold_store(struct device *dev,
0742 struct device_attribute *attr,
0743 const char *buf, size_t count)
0744 {
0745 struct idxd_wq *wq = confdev_to_wq(dev);
0746 struct idxd_device *idxd = wq->idxd;
0747 unsigned int val;
0748 int rc;
0749
0750 rc = kstrtouint(buf, 0, &val);
0751 if (rc < 0)
0752 return -EINVAL;
0753
0754 if (val > wq->size || val <= 0)
0755 return -EINVAL;
0756
0757 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0758 return -EPERM;
0759
0760 if (wq->state != IDXD_WQ_DISABLED)
0761 return -ENXIO;
0762
0763 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
0764 return -EINVAL;
0765
0766 wq->threshold = val;
0767
0768 return count;
0769 }
0770
0771 static struct device_attribute dev_attr_wq_threshold =
0772 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
0773
0774 static ssize_t wq_type_show(struct device *dev,
0775 struct device_attribute *attr, char *buf)
0776 {
0777 struct idxd_wq *wq = confdev_to_wq(dev);
0778
0779 switch (wq->type) {
0780 case IDXD_WQT_KERNEL:
0781 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
0782 case IDXD_WQT_USER:
0783 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
0784 case IDXD_WQT_NONE:
0785 default:
0786 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
0787 }
0788
0789 return -EINVAL;
0790 }
0791
0792 static ssize_t wq_type_store(struct device *dev,
0793 struct device_attribute *attr, const char *buf,
0794 size_t count)
0795 {
0796 struct idxd_wq *wq = confdev_to_wq(dev);
0797 enum idxd_wq_type old_type;
0798
0799 if (wq->state != IDXD_WQ_DISABLED)
0800 return -EPERM;
0801
0802 old_type = wq->type;
0803 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
0804 wq->type = IDXD_WQT_NONE;
0805 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
0806 wq->type = IDXD_WQT_KERNEL;
0807 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
0808 wq->type = IDXD_WQT_USER;
0809 else
0810 return -EINVAL;
0811
0812
0813 if (wq->type != old_type)
0814 memset(wq->name, 0, WQ_NAME_SIZE + 1);
0815
0816 return count;
0817 }
0818
0819 static struct device_attribute dev_attr_wq_type =
0820 __ATTR(type, 0644, wq_type_show, wq_type_store);
0821
0822 static ssize_t wq_name_show(struct device *dev,
0823 struct device_attribute *attr, char *buf)
0824 {
0825 struct idxd_wq *wq = confdev_to_wq(dev);
0826
0827 return sysfs_emit(buf, "%s\n", wq->name);
0828 }
0829
0830 static ssize_t wq_name_store(struct device *dev,
0831 struct device_attribute *attr, const char *buf,
0832 size_t count)
0833 {
0834 struct idxd_wq *wq = confdev_to_wq(dev);
0835 char *input, *pos;
0836
0837 if (wq->state != IDXD_WQ_DISABLED)
0838 return -EPERM;
0839
0840 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
0841 return -EINVAL;
0842
0843
0844
0845
0846
0847 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
0848 return -EOPNOTSUPP;
0849
0850 input = kstrndup(buf, count, GFP_KERNEL);
0851 if (!input)
0852 return -ENOMEM;
0853
0854 pos = strim(input);
0855 memset(wq->name, 0, WQ_NAME_SIZE + 1);
0856 sprintf(wq->name, "%s", pos);
0857 kfree(input);
0858 return count;
0859 }
0860
0861 static struct device_attribute dev_attr_wq_name =
0862 __ATTR(name, 0644, wq_name_show, wq_name_store);
0863
0864 static ssize_t wq_cdev_minor_show(struct device *dev,
0865 struct device_attribute *attr, char *buf)
0866 {
0867 struct idxd_wq *wq = confdev_to_wq(dev);
0868 int minor = -1;
0869
0870 mutex_lock(&wq->wq_lock);
0871 if (wq->idxd_cdev)
0872 minor = wq->idxd_cdev->minor;
0873 mutex_unlock(&wq->wq_lock);
0874
0875 if (minor == -1)
0876 return -ENXIO;
0877 return sysfs_emit(buf, "%d\n", minor);
0878 }
0879
0880 static struct device_attribute dev_attr_wq_cdev_minor =
0881 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
0882
0883 static int __get_sysfs_u64(const char *buf, u64 *val)
0884 {
0885 int rc;
0886
0887 rc = kstrtou64(buf, 0, val);
0888 if (rc < 0)
0889 return -EINVAL;
0890
0891 if (*val == 0)
0892 return -EINVAL;
0893
0894 *val = roundup_pow_of_two(*val);
0895 return 0;
0896 }
0897
0898 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
0899 char *buf)
0900 {
0901 struct idxd_wq *wq = confdev_to_wq(dev);
0902
0903 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
0904 }
0905
0906 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
0907 const char *buf, size_t count)
0908 {
0909 struct idxd_wq *wq = confdev_to_wq(dev);
0910 struct idxd_device *idxd = wq->idxd;
0911 u64 xfer_size;
0912 int rc;
0913
0914 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0915 return -EPERM;
0916
0917 if (wq->state != IDXD_WQ_DISABLED)
0918 return -EPERM;
0919
0920 rc = __get_sysfs_u64(buf, &xfer_size);
0921 if (rc < 0)
0922 return rc;
0923
0924 if (xfer_size > idxd->max_xfer_bytes)
0925 return -EINVAL;
0926
0927 wq->max_xfer_bytes = xfer_size;
0928
0929 return count;
0930 }
0931
0932 static struct device_attribute dev_attr_wq_max_transfer_size =
0933 __ATTR(max_transfer_size, 0644,
0934 wq_max_transfer_size_show, wq_max_transfer_size_store);
0935
0936 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
0937 {
0938 struct idxd_wq *wq = confdev_to_wq(dev);
0939
0940 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
0941 }
0942
0943 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
0944 const char *buf, size_t count)
0945 {
0946 struct idxd_wq *wq = confdev_to_wq(dev);
0947 struct idxd_device *idxd = wq->idxd;
0948 u64 batch_size;
0949 int rc;
0950
0951 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0952 return -EPERM;
0953
0954 if (wq->state != IDXD_WQ_DISABLED)
0955 return -EPERM;
0956
0957 rc = __get_sysfs_u64(buf, &batch_size);
0958 if (rc < 0)
0959 return rc;
0960
0961 if (batch_size > idxd->max_batch_size)
0962 return -EINVAL;
0963
0964 wq->max_batch_size = (u32)batch_size;
0965
0966 return count;
0967 }
0968
0969 static struct device_attribute dev_attr_wq_max_batch_size =
0970 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
0971
0972 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
0973 {
0974 struct idxd_wq *wq = confdev_to_wq(dev);
0975
0976 return sysfs_emit(buf, "%u\n", wq->ats_dis);
0977 }
0978
0979 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
0980 const char *buf, size_t count)
0981 {
0982 struct idxd_wq *wq = confdev_to_wq(dev);
0983 struct idxd_device *idxd = wq->idxd;
0984 bool ats_dis;
0985 int rc;
0986
0987 if (wq->state != IDXD_WQ_DISABLED)
0988 return -EPERM;
0989
0990 if (!idxd->hw.wq_cap.wq_ats_support)
0991 return -EOPNOTSUPP;
0992
0993 rc = kstrtobool(buf, &ats_dis);
0994 if (rc < 0)
0995 return rc;
0996
0997 wq->ats_dis = ats_dis;
0998
0999 return count;
1000 }
1001
1002 static struct device_attribute dev_attr_wq_ats_disable =
1003 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1004
1005 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1006 {
1007 struct idxd_wq *wq = confdev_to_wq(dev);
1008 struct idxd_device *idxd = wq->idxd;
1009 u32 occup, offset;
1010
1011 if (!idxd->hw.wq_cap.occupancy)
1012 return -EOPNOTSUPP;
1013
1014 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1015 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1016
1017 return sysfs_emit(buf, "%u\n", occup);
1018 }
1019
1020 static struct device_attribute dev_attr_wq_occupancy =
1021 __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1022
1023 static ssize_t wq_enqcmds_retries_show(struct device *dev,
1024 struct device_attribute *attr, char *buf)
1025 {
1026 struct idxd_wq *wq = confdev_to_wq(dev);
1027
1028 if (wq_dedicated(wq))
1029 return -EOPNOTSUPP;
1030
1031 return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
1032 }
1033
1034 static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
1035 const char *buf, size_t count)
1036 {
1037 struct idxd_wq *wq = confdev_to_wq(dev);
1038 int rc;
1039 unsigned int retries;
1040
1041 if (wq_dedicated(wq))
1042 return -EOPNOTSUPP;
1043
1044 rc = kstrtouint(buf, 10, &retries);
1045 if (rc < 0)
1046 return rc;
1047
1048 if (retries > IDXD_ENQCMDS_MAX_RETRIES)
1049 retries = IDXD_ENQCMDS_MAX_RETRIES;
1050
1051 wq->enqcmds_retries = retries;
1052 return count;
1053 }
1054
1055 static struct device_attribute dev_attr_wq_enqcmds_retries =
1056 __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
1057
1058 static struct attribute *idxd_wq_attributes[] = {
1059 &dev_attr_wq_clients.attr,
1060 &dev_attr_wq_state.attr,
1061 &dev_attr_wq_group_id.attr,
1062 &dev_attr_wq_mode.attr,
1063 &dev_attr_wq_size.attr,
1064 &dev_attr_wq_priority.attr,
1065 &dev_attr_wq_block_on_fault.attr,
1066 &dev_attr_wq_threshold.attr,
1067 &dev_attr_wq_type.attr,
1068 &dev_attr_wq_name.attr,
1069 &dev_attr_wq_cdev_minor.attr,
1070 &dev_attr_wq_max_transfer_size.attr,
1071 &dev_attr_wq_max_batch_size.attr,
1072 &dev_attr_wq_ats_disable.attr,
1073 &dev_attr_wq_occupancy.attr,
1074 &dev_attr_wq_enqcmds_retries.attr,
1075 NULL,
1076 };
1077
1078 static const struct attribute_group idxd_wq_attribute_group = {
1079 .attrs = idxd_wq_attributes,
1080 };
1081
1082 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1083 &idxd_wq_attribute_group,
1084 NULL,
1085 };
1086
1087 static void idxd_conf_wq_release(struct device *dev)
1088 {
1089 struct idxd_wq *wq = confdev_to_wq(dev);
1090
1091 kfree(wq->wqcfg);
1092 kfree(wq);
1093 }
1094
1095 struct device_type idxd_wq_device_type = {
1096 .name = "wq",
1097 .release = idxd_conf_wq_release,
1098 .groups = idxd_wq_attribute_groups,
1099 };
1100
1101
1102 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1103 char *buf)
1104 {
1105 struct idxd_device *idxd = confdev_to_idxd(dev);
1106
1107 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1108 }
1109 static DEVICE_ATTR_RO(version);
1110
1111 static ssize_t max_work_queues_size_show(struct device *dev,
1112 struct device_attribute *attr,
1113 char *buf)
1114 {
1115 struct idxd_device *idxd = confdev_to_idxd(dev);
1116
1117 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1118 }
1119 static DEVICE_ATTR_RO(max_work_queues_size);
1120
1121 static ssize_t max_groups_show(struct device *dev,
1122 struct device_attribute *attr, char *buf)
1123 {
1124 struct idxd_device *idxd = confdev_to_idxd(dev);
1125
1126 return sysfs_emit(buf, "%u\n", idxd->max_groups);
1127 }
1128 static DEVICE_ATTR_RO(max_groups);
1129
1130 static ssize_t max_work_queues_show(struct device *dev,
1131 struct device_attribute *attr, char *buf)
1132 {
1133 struct idxd_device *idxd = confdev_to_idxd(dev);
1134
1135 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1136 }
1137 static DEVICE_ATTR_RO(max_work_queues);
1138
1139 static ssize_t max_engines_show(struct device *dev,
1140 struct device_attribute *attr, char *buf)
1141 {
1142 struct idxd_device *idxd = confdev_to_idxd(dev);
1143
1144 return sysfs_emit(buf, "%u\n", idxd->max_engines);
1145 }
1146 static DEVICE_ATTR_RO(max_engines);
1147
1148 static ssize_t numa_node_show(struct device *dev,
1149 struct device_attribute *attr, char *buf)
1150 {
1151 struct idxd_device *idxd = confdev_to_idxd(dev);
1152
1153 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1154 }
1155 static DEVICE_ATTR_RO(numa_node);
1156
1157 static ssize_t max_batch_size_show(struct device *dev,
1158 struct device_attribute *attr, char *buf)
1159 {
1160 struct idxd_device *idxd = confdev_to_idxd(dev);
1161
1162 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1163 }
1164 static DEVICE_ATTR_RO(max_batch_size);
1165
1166 static ssize_t max_transfer_size_show(struct device *dev,
1167 struct device_attribute *attr,
1168 char *buf)
1169 {
1170 struct idxd_device *idxd = confdev_to_idxd(dev);
1171
1172 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1173 }
1174 static DEVICE_ATTR_RO(max_transfer_size);
1175
1176 static ssize_t op_cap_show(struct device *dev,
1177 struct device_attribute *attr, char *buf)
1178 {
1179 struct idxd_device *idxd = confdev_to_idxd(dev);
1180 int i, rc = 0;
1181
1182 for (i = 0; i < 4; i++)
1183 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1184
1185 rc--;
1186 rc += sysfs_emit_at(buf, rc, "\n");
1187 return rc;
1188 }
1189 static DEVICE_ATTR_RO(op_cap);
1190
1191 static ssize_t gen_cap_show(struct device *dev,
1192 struct device_attribute *attr, char *buf)
1193 {
1194 struct idxd_device *idxd = confdev_to_idxd(dev);
1195
1196 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1197 }
1198 static DEVICE_ATTR_RO(gen_cap);
1199
1200 static ssize_t configurable_show(struct device *dev,
1201 struct device_attribute *attr, char *buf)
1202 {
1203 struct idxd_device *idxd = confdev_to_idxd(dev);
1204
1205 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1206 }
1207 static DEVICE_ATTR_RO(configurable);
1208
1209 static ssize_t clients_show(struct device *dev,
1210 struct device_attribute *attr, char *buf)
1211 {
1212 struct idxd_device *idxd = confdev_to_idxd(dev);
1213 int count = 0, i;
1214
1215 spin_lock(&idxd->dev_lock);
1216 for (i = 0; i < idxd->max_wqs; i++) {
1217 struct idxd_wq *wq = idxd->wqs[i];
1218
1219 count += wq->client_count;
1220 }
1221 spin_unlock(&idxd->dev_lock);
1222
1223 return sysfs_emit(buf, "%d\n", count);
1224 }
1225 static DEVICE_ATTR_RO(clients);
1226
1227 static ssize_t pasid_enabled_show(struct device *dev,
1228 struct device_attribute *attr, char *buf)
1229 {
1230 struct idxd_device *idxd = confdev_to_idxd(dev);
1231
1232 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1233 }
1234 static DEVICE_ATTR_RO(pasid_enabled);
1235
1236 static ssize_t state_show(struct device *dev,
1237 struct device_attribute *attr, char *buf)
1238 {
1239 struct idxd_device *idxd = confdev_to_idxd(dev);
1240
1241 switch (idxd->state) {
1242 case IDXD_DEV_DISABLED:
1243 return sysfs_emit(buf, "disabled\n");
1244 case IDXD_DEV_ENABLED:
1245 return sysfs_emit(buf, "enabled\n");
1246 case IDXD_DEV_HALTED:
1247 return sysfs_emit(buf, "halted\n");
1248 }
1249
1250 return sysfs_emit(buf, "unknown\n");
1251 }
1252 static DEVICE_ATTR_RO(state);
1253
1254 static ssize_t errors_show(struct device *dev,
1255 struct device_attribute *attr, char *buf)
1256 {
1257 struct idxd_device *idxd = confdev_to_idxd(dev);
1258 int i, out = 0;
1259
1260 spin_lock(&idxd->dev_lock);
1261 for (i = 0; i < 4; i++)
1262 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1263 spin_unlock(&idxd->dev_lock);
1264 out--;
1265 out += sysfs_emit_at(buf, out, "\n");
1266 return out;
1267 }
1268 static DEVICE_ATTR_RO(errors);
1269
1270 static ssize_t max_read_buffers_show(struct device *dev,
1271 struct device_attribute *attr, char *buf)
1272 {
1273 struct idxd_device *idxd = confdev_to_idxd(dev);
1274
1275 return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
1276 }
1277
1278 static ssize_t max_tokens_show(struct device *dev,
1279 struct device_attribute *attr, char *buf)
1280 {
1281 dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
1282 return max_read_buffers_show(dev, attr, buf);
1283 }
1284
1285 static DEVICE_ATTR_RO(max_tokens);
1286 static DEVICE_ATTR_RO(max_read_buffers);
1287
1288 static ssize_t read_buffer_limit_show(struct device *dev,
1289 struct device_attribute *attr, char *buf)
1290 {
1291 struct idxd_device *idxd = confdev_to_idxd(dev);
1292
1293 return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
1294 }
1295
1296 static ssize_t token_limit_show(struct device *dev,
1297 struct device_attribute *attr, char *buf)
1298 {
1299 dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
1300 return read_buffer_limit_show(dev, attr, buf);
1301 }
1302
1303 static ssize_t read_buffer_limit_store(struct device *dev,
1304 struct device_attribute *attr,
1305 const char *buf, size_t count)
1306 {
1307 struct idxd_device *idxd = confdev_to_idxd(dev);
1308 unsigned long val;
1309 int rc;
1310
1311 rc = kstrtoul(buf, 10, &val);
1312 if (rc < 0)
1313 return -EINVAL;
1314
1315 if (idxd->state == IDXD_DEV_ENABLED)
1316 return -EPERM;
1317
1318 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1319 return -EPERM;
1320
1321 if (!idxd->hw.group_cap.rdbuf_limit)
1322 return -EPERM;
1323
1324 if (val > idxd->hw.group_cap.total_rdbufs)
1325 return -EINVAL;
1326
1327 idxd->rdbuf_limit = val;
1328 return count;
1329 }
1330
1331 static ssize_t token_limit_store(struct device *dev,
1332 struct device_attribute *attr,
1333 const char *buf, size_t count)
1334 {
1335 dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
1336 return read_buffer_limit_store(dev, attr, buf, count);
1337 }
1338
1339 static DEVICE_ATTR_RW(token_limit);
1340 static DEVICE_ATTR_RW(read_buffer_limit);
1341
1342 static ssize_t cdev_major_show(struct device *dev,
1343 struct device_attribute *attr, char *buf)
1344 {
1345 struct idxd_device *idxd = confdev_to_idxd(dev);
1346
1347 return sysfs_emit(buf, "%u\n", idxd->major);
1348 }
1349 static DEVICE_ATTR_RO(cdev_major);
1350
1351 static ssize_t cmd_status_show(struct device *dev,
1352 struct device_attribute *attr, char *buf)
1353 {
1354 struct idxd_device *idxd = confdev_to_idxd(dev);
1355
1356 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1357 }
1358
1359 static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
1360 const char *buf, size_t count)
1361 {
1362 struct idxd_device *idxd = confdev_to_idxd(dev);
1363
1364 idxd->cmd_status = 0;
1365 return count;
1366 }
1367 static DEVICE_ATTR_RW(cmd_status);
1368
1369 static struct attribute *idxd_device_attributes[] = {
1370 &dev_attr_version.attr,
1371 &dev_attr_max_groups.attr,
1372 &dev_attr_max_work_queues.attr,
1373 &dev_attr_max_work_queues_size.attr,
1374 &dev_attr_max_engines.attr,
1375 &dev_attr_numa_node.attr,
1376 &dev_attr_max_batch_size.attr,
1377 &dev_attr_max_transfer_size.attr,
1378 &dev_attr_op_cap.attr,
1379 &dev_attr_gen_cap.attr,
1380 &dev_attr_configurable.attr,
1381 &dev_attr_clients.attr,
1382 &dev_attr_pasid_enabled.attr,
1383 &dev_attr_state.attr,
1384 &dev_attr_errors.attr,
1385 &dev_attr_max_tokens.attr,
1386 &dev_attr_max_read_buffers.attr,
1387 &dev_attr_token_limit.attr,
1388 &dev_attr_read_buffer_limit.attr,
1389 &dev_attr_cdev_major.attr,
1390 &dev_attr_cmd_status.attr,
1391 NULL,
1392 };
1393
1394 static const struct attribute_group idxd_device_attribute_group = {
1395 .attrs = idxd_device_attributes,
1396 };
1397
1398 static const struct attribute_group *idxd_attribute_groups[] = {
1399 &idxd_device_attribute_group,
1400 NULL,
1401 };
1402
1403 static void idxd_conf_device_release(struct device *dev)
1404 {
1405 struct idxd_device *idxd = confdev_to_idxd(dev);
1406
1407 kfree(idxd->groups);
1408 kfree(idxd->wqs);
1409 kfree(idxd->engines);
1410 ida_free(&idxd_ida, idxd->id);
1411 kfree(idxd);
1412 }
1413
1414 struct device_type dsa_device_type = {
1415 .name = "dsa",
1416 .release = idxd_conf_device_release,
1417 .groups = idxd_attribute_groups,
1418 };
1419
1420 struct device_type iax_device_type = {
1421 .name = "iax",
1422 .release = idxd_conf_device_release,
1423 .groups = idxd_attribute_groups,
1424 };
1425
1426 static int idxd_register_engine_devices(struct idxd_device *idxd)
1427 {
1428 struct idxd_engine *engine;
1429 int i, j, rc;
1430
1431 for (i = 0; i < idxd->max_engines; i++) {
1432 engine = idxd->engines[i];
1433 rc = device_add(engine_confdev(engine));
1434 if (rc < 0)
1435 goto cleanup;
1436 }
1437
1438 return 0;
1439
1440 cleanup:
1441 j = i - 1;
1442 for (; i < idxd->max_engines; i++) {
1443 engine = idxd->engines[i];
1444 put_device(engine_confdev(engine));
1445 }
1446
1447 while (j--) {
1448 engine = idxd->engines[j];
1449 device_unregister(engine_confdev(engine));
1450 }
1451 return rc;
1452 }
1453
1454 static int idxd_register_group_devices(struct idxd_device *idxd)
1455 {
1456 struct idxd_group *group;
1457 int i, j, rc;
1458
1459 for (i = 0; i < idxd->max_groups; i++) {
1460 group = idxd->groups[i];
1461 rc = device_add(group_confdev(group));
1462 if (rc < 0)
1463 goto cleanup;
1464 }
1465
1466 return 0;
1467
1468 cleanup:
1469 j = i - 1;
1470 for (; i < idxd->max_groups; i++) {
1471 group = idxd->groups[i];
1472 put_device(group_confdev(group));
1473 }
1474
1475 while (j--) {
1476 group = idxd->groups[j];
1477 device_unregister(group_confdev(group));
1478 }
1479 return rc;
1480 }
1481
1482 static int idxd_register_wq_devices(struct idxd_device *idxd)
1483 {
1484 struct idxd_wq *wq;
1485 int i, rc, j;
1486
1487 for (i = 0; i < idxd->max_wqs; i++) {
1488 wq = idxd->wqs[i];
1489 rc = device_add(wq_confdev(wq));
1490 if (rc < 0)
1491 goto cleanup;
1492 }
1493
1494 return 0;
1495
1496 cleanup:
1497 j = i - 1;
1498 for (; i < idxd->max_wqs; i++) {
1499 wq = idxd->wqs[i];
1500 put_device(wq_confdev(wq));
1501 }
1502
1503 while (j--) {
1504 wq = idxd->wqs[j];
1505 device_unregister(wq_confdev(wq));
1506 }
1507 return rc;
1508 }
1509
1510 int idxd_register_devices(struct idxd_device *idxd)
1511 {
1512 struct device *dev = &idxd->pdev->dev;
1513 int rc, i;
1514
1515 rc = device_add(idxd_confdev(idxd));
1516 if (rc < 0)
1517 return rc;
1518
1519 rc = idxd_register_wq_devices(idxd);
1520 if (rc < 0) {
1521 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1522 goto err_wq;
1523 }
1524
1525 rc = idxd_register_engine_devices(idxd);
1526 if (rc < 0) {
1527 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1528 goto err_engine;
1529 }
1530
1531 rc = idxd_register_group_devices(idxd);
1532 if (rc < 0) {
1533 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1534 goto err_group;
1535 }
1536
1537 return 0;
1538
1539 err_group:
1540 for (i = 0; i < idxd->max_engines; i++)
1541 device_unregister(engine_confdev(idxd->engines[i]));
1542 err_engine:
1543 for (i = 0; i < idxd->max_wqs; i++)
1544 device_unregister(wq_confdev(idxd->wqs[i]));
1545 err_wq:
1546 device_del(idxd_confdev(idxd));
1547 return rc;
1548 }
1549
1550 void idxd_unregister_devices(struct idxd_device *idxd)
1551 {
1552 int i;
1553
1554 for (i = 0; i < idxd->max_wqs; i++) {
1555 struct idxd_wq *wq = idxd->wqs[i];
1556
1557 device_unregister(wq_confdev(wq));
1558 }
1559
1560 for (i = 0; i < idxd->max_engines; i++) {
1561 struct idxd_engine *engine = idxd->engines[i];
1562
1563 device_unregister(engine_confdev(engine));
1564 }
1565
1566 for (i = 0; i < idxd->max_groups; i++) {
1567 struct idxd_group *group = idxd->groups[i];
1568
1569 device_unregister(group_confdev(group));
1570 }
1571 }
1572
1573 int idxd_register_bus_type(void)
1574 {
1575 return bus_register(&dsa_bus_type);
1576 }
1577
1578 void idxd_unregister_bus_type(void)
1579 {
1580 bus_unregister(&dsa_bus_type);
1581 }