0001
0002
0003 #include <linux/io-64-nonatomic-lo-hi.h>
0004 #include <linux/security.h>
0005 #include <linux/debugfs.h>
0006 #include <linux/mutex.h>
0007 #include <cxlmem.h>
0008 #include <cxl.h>
0009
0010 #include "core.h"
0011
0012 static bool cxl_raw_allow_all;
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #define cxl_for_each_cmd(cmd) \
0024 for ((cmd) = &cxl_mem_commands[0]; \
0025 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
0026
0027 #define CXL_CMD(_id, sin, sout, _flags) \
0028 [CXL_MEM_COMMAND_ID_##_id] = { \
0029 .info = { \
0030 .id = CXL_MEM_COMMAND_ID_##_id, \
0031 .size_in = sin, \
0032 .size_out = sout, \
0033 }, \
0034 .opcode = CXL_MBOX_OP_##_id, \
0035 .flags = _flags, \
0036 }
0037
0038 #define CXL_VARIABLE_PAYLOAD ~0U
0039
0040
0041
0042
0043
0044
0045 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
0046 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
0047 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
0048 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
0049 #endif
0050 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
0051 CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
0052 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
0053 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
0054 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
0055 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
0056 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
0057 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
0058 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
0059 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
0060 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
0061 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
0062 CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
0063 CXL_CMD(INJECT_POISON, 0x8, 0, 0),
0064 CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
0065 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
0066 CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
0067 CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
0068 };
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 static u16 cxl_disabled_raw_commands[] = {
0090 CXL_MBOX_OP_ACTIVATE_FW,
0091 CXL_MBOX_OP_SET_PARTITION_INFO,
0092 CXL_MBOX_OP_SET_LSA,
0093 CXL_MBOX_OP_SET_SHUTDOWN_STATE,
0094 CXL_MBOX_OP_SCAN_MEDIA,
0095 CXL_MBOX_OP_GET_SCAN_MEDIA,
0096 };
0097
0098
0099
0100
0101
0102
0103
0104 static u8 security_command_sets[] = {
0105 0x44,
0106 0x45,
0107 0x46,
0108 };
0109
0110 static bool cxl_is_security_command(u16 opcode)
0111 {
0112 int i;
0113
0114 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
0115 if (security_command_sets[i] == (opcode >> 8))
0116 return true;
0117 return false;
0118 }
0119
0120 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
0121 {
0122 struct cxl_mem_command *c;
0123
0124 cxl_for_each_cmd(c)
0125 if (c->opcode == opcode)
0126 return c;
0127
0128 return NULL;
0129 }
0130
0131 static const char *cxl_mem_opcode_to_name(u16 opcode)
0132 {
0133 struct cxl_mem_command *c;
0134
0135 c = cxl_mem_find_command(opcode);
0136 if (!c)
0137 return NULL;
0138
0139 return cxl_command_names[c->info.id].name;
0140 }
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
0165 size_t in_size, void *out, size_t out_size)
0166 {
0167 const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
0168 struct cxl_mbox_cmd mbox_cmd = {
0169 .opcode = opcode,
0170 .payload_in = in,
0171 .size_in = in_size,
0172 .size_out = out_size,
0173 .payload_out = out,
0174 };
0175 int rc;
0176
0177 if (out_size > cxlds->payload_size)
0178 return -E2BIG;
0179
0180 rc = cxlds->mbox_send(cxlds, &mbox_cmd);
0181 if (rc)
0182 return rc;
0183
0184 if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS)
0185 return cxl_mbox_cmd_rc2errno(&mbox_cmd);
0186
0187
0188
0189
0190
0191 if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) {
0192 if (mbox_cmd.size_out != out_size)
0193 return -EIO;
0194 }
0195 return 0;
0196 }
0197 EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
0198
0199 static bool cxl_mem_raw_command_allowed(u16 opcode)
0200 {
0201 int i;
0202
0203 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
0204 return false;
0205
0206 if (security_locked_down(LOCKDOWN_PCI_ACCESS))
0207 return false;
0208
0209 if (cxl_raw_allow_all)
0210 return true;
0211
0212 if (cxl_is_security_command(opcode))
0213 return false;
0214
0215 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
0216 if (cxl_disabled_raw_commands[i] == opcode)
0217 return false;
0218
0219 return true;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
0241 {
0242 switch (opcode) {
0243 case CXL_MBOX_OP_SET_PARTITION_INFO: {
0244 struct cxl_mbox_set_partition_info *pi = payload_in;
0245
0246 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
0247 return false;
0248 break;
0249 }
0250 default:
0251 break;
0252 }
0253 return true;
0254 }
0255
0256 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
0257 struct cxl_dev_state *cxlds, u16 opcode,
0258 size_t in_size, size_t out_size, u64 in_payload)
0259 {
0260 *mbox = (struct cxl_mbox_cmd) {
0261 .opcode = opcode,
0262 .size_in = in_size,
0263 };
0264
0265 if (in_size) {
0266 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
0267 in_size);
0268 if (IS_ERR(mbox->payload_in))
0269 return PTR_ERR(mbox->payload_in);
0270
0271 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
0272 dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
0273 cxl_mem_opcode_to_name(opcode));
0274 kvfree(mbox->payload_in);
0275 return -EBUSY;
0276 }
0277 }
0278
0279
0280 if (out_size == CXL_VARIABLE_PAYLOAD)
0281 mbox->size_out = cxlds->payload_size;
0282 else
0283 mbox->size_out = out_size;
0284
0285 if (mbox->size_out) {
0286 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
0287 if (!mbox->payload_out) {
0288 kvfree(mbox->payload_in);
0289 return -ENOMEM;
0290 }
0291 }
0292 return 0;
0293 }
0294
0295 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
0296 {
0297 kvfree(mbox->payload_in);
0298 kvfree(mbox->payload_out);
0299 }
0300
0301 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
0302 const struct cxl_send_command *send_cmd,
0303 struct cxl_dev_state *cxlds)
0304 {
0305 if (send_cmd->raw.rsvd)
0306 return -EINVAL;
0307
0308
0309
0310
0311
0312
0313 if (send_cmd->out.size > cxlds->payload_size)
0314 return -EINVAL;
0315
0316 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
0317 return -EPERM;
0318
0319 dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
0320
0321 *mem_cmd = (struct cxl_mem_command) {
0322 .info = {
0323 .id = CXL_MEM_COMMAND_ID_RAW,
0324 .size_in = send_cmd->in.size,
0325 .size_out = send_cmd->out.size,
0326 },
0327 .opcode = send_cmd->raw.opcode
0328 };
0329
0330 return 0;
0331 }
0332
0333 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
0334 const struct cxl_send_command *send_cmd,
0335 struct cxl_dev_state *cxlds)
0336 {
0337 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
0338 const struct cxl_command_info *info = &c->info;
0339
0340 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
0341 return -EINVAL;
0342
0343 if (send_cmd->rsvd)
0344 return -EINVAL;
0345
0346 if (send_cmd->in.rsvd || send_cmd->out.rsvd)
0347 return -EINVAL;
0348
0349
0350 if (!test_bit(info->id, cxlds->enabled_cmds))
0351 return -ENOTTY;
0352
0353
0354 if (test_bit(info->id, cxlds->exclusive_cmds))
0355 return -EBUSY;
0356
0357
0358 if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
0359 (info->size_in != send_cmd->in.size))
0360 return -ENOMEM;
0361
0362
0363 if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
0364 (send_cmd->out.size < info->size_out))
0365 return -ENOMEM;
0366
0367 *mem_cmd = (struct cxl_mem_command) {
0368 .info = {
0369 .id = info->id,
0370 .flags = info->flags,
0371 .size_in = send_cmd->in.size,
0372 .size_out = send_cmd->out.size,
0373 },
0374 .opcode = c->opcode
0375 };
0376
0377 return 0;
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
0398 struct cxl_dev_state *cxlds,
0399 const struct cxl_send_command *send_cmd)
0400 {
0401 struct cxl_mem_command mem_cmd;
0402 int rc;
0403
0404 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
0405 return -ENOTTY;
0406
0407
0408
0409
0410
0411
0412 if (send_cmd->in.size > cxlds->payload_size)
0413 return -EINVAL;
0414
0415
0416 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
0417 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
0418 else
0419 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
0420
0421 if (rc)
0422 return rc;
0423
0424
0425 return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
0426 mem_cmd.info.size_in, mem_cmd.info.size_out,
0427 send_cmd->in.payload);
0428 }
0429
0430 int cxl_query_cmd(struct cxl_memdev *cxlmd,
0431 struct cxl_mem_query_commands __user *q)
0432 {
0433 struct device *dev = &cxlmd->dev;
0434 struct cxl_mem_command *cmd;
0435 u32 n_commands;
0436 int j = 0;
0437
0438 dev_dbg(dev, "Query IOCTL\n");
0439
0440 if (get_user(n_commands, &q->n_commands))
0441 return -EFAULT;
0442
0443
0444 if (n_commands == 0)
0445 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
0446
0447
0448
0449
0450
0451 cxl_for_each_cmd(cmd) {
0452 const struct cxl_command_info *info = &cmd->info;
0453
0454 if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
0455 return -EFAULT;
0456
0457 if (j == n_commands)
0458 break;
0459 }
0460
0461 return 0;
0462 }
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
0488 struct cxl_mbox_cmd *mbox_cmd,
0489 u64 out_payload, s32 *size_out,
0490 u32 *retval)
0491 {
0492 struct device *dev = cxlds->dev;
0493 int rc;
0494
0495 dev_dbg(dev,
0496 "Submitting %s command for user\n"
0497 "\topcode: %x\n"
0498 "\tsize: %zx\n",
0499 cxl_mem_opcode_to_name(mbox_cmd->opcode),
0500 mbox_cmd->opcode, mbox_cmd->size_in);
0501
0502 rc = cxlds->mbox_send(cxlds, mbox_cmd);
0503 if (rc)
0504 goto out;
0505
0506
0507
0508
0509
0510
0511 if (mbox_cmd->size_out) {
0512 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
0513 "Invalid return size\n");
0514 if (copy_to_user(u64_to_user_ptr(out_payload),
0515 mbox_cmd->payload_out, mbox_cmd->size_out)) {
0516 rc = -EFAULT;
0517 goto out;
0518 }
0519 }
0520
0521 *size_out = mbox_cmd->size_out;
0522 *retval = mbox_cmd->return_code;
0523
0524 out:
0525 cxl_mbox_cmd_dtor(mbox_cmd);
0526 return rc;
0527 }
0528
0529 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
0530 {
0531 struct cxl_dev_state *cxlds = cxlmd->cxlds;
0532 struct device *dev = &cxlmd->dev;
0533 struct cxl_send_command send;
0534 struct cxl_mbox_cmd mbox_cmd;
0535 int rc;
0536
0537 dev_dbg(dev, "Send IOCTL\n");
0538
0539 if (copy_from_user(&send, s, sizeof(send)))
0540 return -EFAULT;
0541
0542 rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
0543 if (rc)
0544 return rc;
0545
0546 rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
0547 &send.out.size, &send.retval);
0548 if (rc)
0549 return rc;
0550
0551 if (copy_to_user(s, &send, sizeof(send)))
0552 return -EFAULT;
0553
0554 return 0;
0555 }
0556
0557 static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
0558 {
0559 u32 remaining = size;
0560 u32 offset = 0;
0561
0562 while (remaining) {
0563 u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
0564 struct cxl_mbox_get_log log = {
0565 .uuid = *uuid,
0566 .offset = cpu_to_le32(offset),
0567 .length = cpu_to_le32(xfer_size)
0568 };
0569 int rc;
0570
0571 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
0572 out, xfer_size);
0573 if (rc < 0)
0574 return rc;
0575
0576 out += xfer_size;
0577 remaining -= xfer_size;
0578 offset += xfer_size;
0579 }
0580
0581 return 0;
0582 }
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593 static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
0594 {
0595 struct cxl_cel_entry *cel_entry;
0596 const int cel_entries = size / sizeof(*cel_entry);
0597 int i;
0598
0599 cel_entry = (struct cxl_cel_entry *) cel;
0600
0601 for (i = 0; i < cel_entries; i++) {
0602 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
0603 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
0604
0605 if (!cmd) {
0606 dev_dbg(cxlds->dev,
0607 "Opcode 0x%04x unsupported by driver", opcode);
0608 continue;
0609 }
0610
0611 set_bit(cmd->info.id, cxlds->enabled_cmds);
0612 }
0613 }
0614
0615 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
0616 {
0617 struct cxl_mbox_get_supported_logs *ret;
0618 int rc;
0619
0620 ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
0621 if (!ret)
0622 return ERR_PTR(-ENOMEM);
0623
0624 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
0625 cxlds->payload_size);
0626 if (rc < 0) {
0627 kvfree(ret);
0628 return ERR_PTR(rc);
0629 }
0630
0631 return ret;
0632 }
0633
0634 enum {
0635 CEL_UUID,
0636 VENDOR_DEBUG_UUID,
0637 };
0638
0639
0640 static const uuid_t log_uuid[] = {
0641 [CEL_UUID] = DEFINE_CXL_CEL_UUID,
0642 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
0643 };
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655 int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
0656 {
0657 struct cxl_mbox_get_supported_logs *gsl;
0658 struct device *dev = cxlds->dev;
0659 struct cxl_mem_command *cmd;
0660 int i, rc;
0661
0662 gsl = cxl_get_gsl(cxlds);
0663 if (IS_ERR(gsl))
0664 return PTR_ERR(gsl);
0665
0666 rc = -ENOENT;
0667 for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
0668 u32 size = le32_to_cpu(gsl->entry[i].size);
0669 uuid_t uuid = gsl->entry[i].uuid;
0670 u8 *log;
0671
0672 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
0673
0674 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
0675 continue;
0676
0677 log = kvmalloc(size, GFP_KERNEL);
0678 if (!log) {
0679 rc = -ENOMEM;
0680 goto out;
0681 }
0682
0683 rc = cxl_xfer_log(cxlds, &uuid, size, log);
0684 if (rc) {
0685 kvfree(log);
0686 goto out;
0687 }
0688
0689 cxl_walk_cel(cxlds, size, log);
0690 kvfree(log);
0691
0692
0693 cxl_for_each_cmd(cmd)
0694 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
0695 set_bit(cmd->info.id, cxlds->enabled_cmds);
0696
0697
0698 rc = 0;
0699 }
0700
0701 out:
0702 kvfree(gsl);
0703 return rc;
0704 }
0705 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719 static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
0720 {
0721 struct cxl_mbox_get_partition_info pi;
0722 int rc;
0723
0724 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
0725 &pi, sizeof(pi));
0726
0727 if (rc)
0728 return rc;
0729
0730 cxlds->active_volatile_bytes =
0731 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
0732 cxlds->active_persistent_bytes =
0733 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
0734 cxlds->next_volatile_bytes =
0735 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
0736 cxlds->next_persistent_bytes =
0737 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
0738
0739 return 0;
0740 }
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751 int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
0752 {
0753
0754 struct cxl_mbox_identify id;
0755 int rc;
0756
0757 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
0758 sizeof(id));
0759 if (rc < 0)
0760 return rc;
0761
0762 cxlds->total_bytes =
0763 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
0764 cxlds->volatile_only_bytes =
0765 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
0766 cxlds->persistent_only_bytes =
0767 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
0768 cxlds->partition_align_bytes =
0769 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
0770
0771 cxlds->lsa_size = le32_to_cpu(id.lsa_size);
0772 memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
0773
0774 return 0;
0775 }
0776 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
0777
0778 static int add_dpa_res(struct device *dev, struct resource *parent,
0779 struct resource *res, resource_size_t start,
0780 resource_size_t size, const char *type)
0781 {
0782 int rc;
0783
0784 res->name = type;
0785 res->start = start;
0786 res->end = start + size - 1;
0787 res->flags = IORESOURCE_MEM;
0788 if (resource_size(res) == 0) {
0789 dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
0790 return 0;
0791 }
0792 rc = request_resource(parent, res);
0793 if (rc) {
0794 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
0795 res, rc);
0796 return rc;
0797 }
0798
0799 dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
0800
0801 return 0;
0802 }
0803
0804 int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
0805 {
0806 struct device *dev = cxlds->dev;
0807 int rc;
0808
0809 cxlds->dpa_res =
0810 (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
0811
0812 if (cxlds->partition_align_bytes == 0) {
0813 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
0814 cxlds->volatile_only_bytes, "ram");
0815 if (rc)
0816 return rc;
0817 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
0818 cxlds->volatile_only_bytes,
0819 cxlds->persistent_only_bytes, "pmem");
0820 }
0821
0822 rc = cxl_mem_get_partition_info(cxlds);
0823 if (rc) {
0824 dev_err(dev, "Failed to query partition information\n");
0825 return rc;
0826 }
0827
0828 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
0829 cxlds->active_volatile_bytes, "ram");
0830 if (rc)
0831 return rc;
0832 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
0833 cxlds->active_volatile_bytes,
0834 cxlds->active_persistent_bytes, "pmem");
0835 }
0836 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
0837
0838 struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
0839 {
0840 struct cxl_dev_state *cxlds;
0841
0842 cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
0843 if (!cxlds) {
0844 dev_err(dev, "No memory available\n");
0845 return ERR_PTR(-ENOMEM);
0846 }
0847
0848 mutex_init(&cxlds->mbox_mutex);
0849 cxlds->dev = dev;
0850
0851 return cxlds;
0852 }
0853 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
0854
0855 void __init cxl_mbox_init(void)
0856 {
0857 struct dentry *mbox_debugfs;
0858
0859 mbox_debugfs = cxl_debugfs_create_dir("mbox");
0860 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
0861 &cxl_raw_allow_all);
0862 }