Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
0003 #include <linux/io-64-nonatomic-lo-hi.h>
0004 #include <linux/security.h>
0005 #include <linux/debugfs.h>
0006 #include <linux/mutex.h>
0007 #include <cxlmem.h>
0008 #include <cxl.h>
0009 
0010 #include "core.h"
0011 
0012 static bool cxl_raw_allow_all;
0013 
0014 /**
0015  * DOC: cxl mbox
0016  *
0017  * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
0018  * implementation is used by the cxl_pci driver to initialize the device
0019  * and implement the cxl_mem.h IOCTL UAPI. It also implements the
0020  * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
0021  */
0022 
0023 #define cxl_for_each_cmd(cmd)                                                  \
0024     for ((cmd) = &cxl_mem_commands[0];                                     \
0025          ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
0026 
0027 #define CXL_CMD(_id, sin, sout, _flags)                                        \
0028     [CXL_MEM_COMMAND_ID_##_id] = {                                         \
0029     .info = {                                                              \
0030             .id = CXL_MEM_COMMAND_ID_##_id,                        \
0031             .size_in = sin,                                        \
0032             .size_out = sout,                                      \
0033         },                                                             \
0034     .opcode = CXL_MBOX_OP_##_id,                                           \
0035     .flags = _flags,                                                       \
0036     }
0037 
0038 #define CXL_VARIABLE_PAYLOAD    ~0U
0039 /*
0040  * This table defines the supported mailbox commands for the driver. This table
0041  * is made up of a UAPI structure. Non-negative values as parameters in the
0042  * table will be validated against the user's input. For example, if size_in is
0043  * 0, and the user passed in 1, it is an error.
0044  */
0045 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
0046     CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
0047 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
0048     CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
0049 #endif
0050     CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
0051     CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
0052     CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
0053     CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
0054     CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
0055     CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
0056     CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
0057     CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
0058     CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
0059     CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
0060     CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
0061     CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
0062     CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
0063     CXL_CMD(INJECT_POISON, 0x8, 0, 0),
0064     CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
0065     CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
0066     CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
0067     CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
0068 };
0069 
0070 /*
0071  * Commands that RAW doesn't permit. The rationale for each:
0072  *
0073  * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
0074  * coordination of transaction timeout values at the root bridge level.
0075  *
0076  * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
0077  * and needs to be coordinated with HDM updates.
0078  *
0079  * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
0080  * driver and any writes from userspace invalidates those contents.
0081  *
0082  * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
0083  * to the device after it is marked clean, userspace can not make that
0084  * assertion.
0085  *
0086  * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
0087  * is kept up to date with patrol notifications and error management.
0088  */
0089 static u16 cxl_disabled_raw_commands[] = {
0090     CXL_MBOX_OP_ACTIVATE_FW,
0091     CXL_MBOX_OP_SET_PARTITION_INFO,
0092     CXL_MBOX_OP_SET_LSA,
0093     CXL_MBOX_OP_SET_SHUTDOWN_STATE,
0094     CXL_MBOX_OP_SCAN_MEDIA,
0095     CXL_MBOX_OP_GET_SCAN_MEDIA,
0096 };
0097 
0098 /*
0099  * Command sets that RAW doesn't permit. All opcodes in this set are
0100  * disabled because they pass plain text security payloads over the
0101  * user/kernel boundary. This functionality is intended to be wrapped
0102  * behind the keys ABI which allows for encrypted payloads in the UAPI
0103  */
0104 static u8 security_command_sets[] = {
0105     0x44, /* Sanitize */
0106     0x45, /* Persistent Memory Data-at-rest Security */
0107     0x46, /* Security Passthrough */
0108 };
0109 
0110 static bool cxl_is_security_command(u16 opcode)
0111 {
0112     int i;
0113 
0114     for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
0115         if (security_command_sets[i] == (opcode >> 8))
0116             return true;
0117     return false;
0118 }
0119 
0120 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
0121 {
0122     struct cxl_mem_command *c;
0123 
0124     cxl_for_each_cmd(c)
0125         if (c->opcode == opcode)
0126             return c;
0127 
0128     return NULL;
0129 }
0130 
0131 static const char *cxl_mem_opcode_to_name(u16 opcode)
0132 {
0133     struct cxl_mem_command *c;
0134 
0135     c = cxl_mem_find_command(opcode);
0136     if (!c)
0137         return NULL;
0138 
0139     return cxl_command_names[c->info.id].name;
0140 }
0141 
0142 /**
0143  * cxl_mbox_send_cmd() - Send a mailbox command to a device.
0144  * @cxlds: The device data for the operation
0145  * @opcode: Opcode for the mailbox command.
0146  * @in: The input payload for the mailbox command.
0147  * @in_size: The length of the input payload
0148  * @out: Caller allocated buffer for the output.
0149  * @out_size: Expected size of output.
0150  *
0151  * Context: Any context.
0152  * Return:
0153  *  * %>=0  - Number of bytes returned in @out.
0154  *  * %-E2BIG   - Payload is too large for hardware.
0155  *  * %-EBUSY   - Couldn't acquire exclusive mailbox access.
0156  *  * %-EFAULT  - Hardware error occurred.
0157  *  * %-ENXIO   - Command completed, but device reported an error.
0158  *  * %-EIO - Unexpected output size.
0159  *
0160  * Mailbox commands may execute successfully yet the device itself reported an
0161  * error. While this distinction can be useful for commands from userspace, the
0162  * kernel will only be able to use results when both are successful.
0163  */
0164 int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
0165               size_t in_size, void *out, size_t out_size)
0166 {
0167     const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
0168     struct cxl_mbox_cmd mbox_cmd = {
0169         .opcode = opcode,
0170         .payload_in = in,
0171         .size_in = in_size,
0172         .size_out = out_size,
0173         .payload_out = out,
0174     };
0175     int rc;
0176 
0177     if (out_size > cxlds->payload_size)
0178         return -E2BIG;
0179 
0180     rc = cxlds->mbox_send(cxlds, &mbox_cmd);
0181     if (rc)
0182         return rc;
0183 
0184     if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS)
0185         return cxl_mbox_cmd_rc2errno(&mbox_cmd);
0186 
0187     /*
0188      * Variable sized commands can't be validated and so it's up to the
0189      * caller to do that if they wish.
0190      */
0191     if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) {
0192         if (mbox_cmd.size_out != out_size)
0193             return -EIO;
0194     }
0195     return 0;
0196 }
0197 EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
0198 
0199 static bool cxl_mem_raw_command_allowed(u16 opcode)
0200 {
0201     int i;
0202 
0203     if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
0204         return false;
0205 
0206     if (security_locked_down(LOCKDOWN_PCI_ACCESS))
0207         return false;
0208 
0209     if (cxl_raw_allow_all)
0210         return true;
0211 
0212     if (cxl_is_security_command(opcode))
0213         return false;
0214 
0215     for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
0216         if (cxl_disabled_raw_commands[i] == opcode)
0217             return false;
0218 
0219     return true;
0220 }
0221 
0222 /**
0223  * cxl_payload_from_user_allowed() - Check contents of in_payload.
0224  * @opcode: The mailbox command opcode.
0225  * @payload_in: Pointer to the input payload passed in from user space.
0226  *
0227  * Return:
0228  *  * true  - payload_in passes check for @opcode.
0229  *  * false - payload_in contains invalid or unsupported values.
0230  *
0231  * The driver may inspect payload contents before sending a mailbox
0232  * command from user space to the device. The intent is to reject
0233  * commands with input payloads that are known to be unsafe. This
0234  * check is not intended to replace the users careful selection of
0235  * mailbox command parameters and makes no guarantee that the user
0236  * command will succeed, nor that it is appropriate.
0237  *
0238  * The specific checks are determined by the opcode.
0239  */
0240 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
0241 {
0242     switch (opcode) {
0243     case CXL_MBOX_OP_SET_PARTITION_INFO: {
0244         struct cxl_mbox_set_partition_info *pi = payload_in;
0245 
0246         if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
0247             return false;
0248         break;
0249     }
0250     default:
0251         break;
0252     }
0253     return true;
0254 }
0255 
0256 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
0257                  struct cxl_dev_state *cxlds, u16 opcode,
0258                  size_t in_size, size_t out_size, u64 in_payload)
0259 {
0260     *mbox = (struct cxl_mbox_cmd) {
0261         .opcode = opcode,
0262         .size_in = in_size,
0263     };
0264 
0265     if (in_size) {
0266         mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
0267                         in_size);
0268         if (IS_ERR(mbox->payload_in))
0269             return PTR_ERR(mbox->payload_in);
0270 
0271         if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
0272             dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
0273                 cxl_mem_opcode_to_name(opcode));
0274             kvfree(mbox->payload_in);
0275             return -EBUSY;
0276         }
0277     }
0278 
0279     /* Prepare to handle a full payload for variable sized output */
0280     if (out_size == CXL_VARIABLE_PAYLOAD)
0281         mbox->size_out = cxlds->payload_size;
0282     else
0283         mbox->size_out = out_size;
0284 
0285     if (mbox->size_out) {
0286         mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
0287         if (!mbox->payload_out) {
0288             kvfree(mbox->payload_in);
0289             return -ENOMEM;
0290         }
0291     }
0292     return 0;
0293 }
0294 
0295 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
0296 {
0297     kvfree(mbox->payload_in);
0298     kvfree(mbox->payload_out);
0299 }
0300 
0301 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
0302                   const struct cxl_send_command *send_cmd,
0303                   struct cxl_dev_state *cxlds)
0304 {
0305     if (send_cmd->raw.rsvd)
0306         return -EINVAL;
0307 
0308     /*
0309      * Unlike supported commands, the output size of RAW commands
0310      * gets passed along without further checking, so it must be
0311      * validated here.
0312      */
0313     if (send_cmd->out.size > cxlds->payload_size)
0314         return -EINVAL;
0315 
0316     if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
0317         return -EPERM;
0318 
0319     dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
0320 
0321     *mem_cmd = (struct cxl_mem_command) {
0322         .info = {
0323             .id = CXL_MEM_COMMAND_ID_RAW,
0324             .size_in = send_cmd->in.size,
0325             .size_out = send_cmd->out.size,
0326         },
0327         .opcode = send_cmd->raw.opcode
0328     };
0329 
0330     return 0;
0331 }
0332 
0333 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
0334               const struct cxl_send_command *send_cmd,
0335               struct cxl_dev_state *cxlds)
0336 {
0337     struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
0338     const struct cxl_command_info *info = &c->info;
0339 
0340     if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
0341         return -EINVAL;
0342 
0343     if (send_cmd->rsvd)
0344         return -EINVAL;
0345 
0346     if (send_cmd->in.rsvd || send_cmd->out.rsvd)
0347         return -EINVAL;
0348 
0349     /* Check that the command is enabled for hardware */
0350     if (!test_bit(info->id, cxlds->enabled_cmds))
0351         return -ENOTTY;
0352 
0353     /* Check that the command is not claimed for exclusive kernel use */
0354     if (test_bit(info->id, cxlds->exclusive_cmds))
0355         return -EBUSY;
0356 
0357     /* Check the input buffer is the expected size */
0358     if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
0359         (info->size_in != send_cmd->in.size))
0360         return -ENOMEM;
0361 
0362     /* Check the output buffer is at least large enough */
0363     if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
0364         (send_cmd->out.size < info->size_out))
0365         return -ENOMEM;
0366 
0367     *mem_cmd = (struct cxl_mem_command) {
0368         .info = {
0369             .id = info->id,
0370             .flags = info->flags,
0371             .size_in = send_cmd->in.size,
0372             .size_out = send_cmd->out.size,
0373         },
0374         .opcode = c->opcode
0375     };
0376 
0377     return 0;
0378 }
0379 
0380 /**
0381  * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
0382  * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
0383  * @cxlds: The device data for the operation
0384  * @send_cmd: &struct cxl_send_command copied in from userspace.
0385  *
0386  * Return:
0387  *  * %0    - @out_cmd is ready to send.
0388  *  * %-ENOTTY  - Invalid command specified.
0389  *  * %-EINVAL  - Reserved fields or invalid values were used.
0390  *  * %-ENOMEM  - Input or output buffer wasn't sized properly.
0391  *  * %-EPERM   - Attempted to use a protected command.
0392  *  * %-EBUSY   - Kernel has claimed exclusive access to this opcode
0393  *
0394  * The result of this command is a fully validated command in @mbox_cmd that is
0395  * safe to send to the hardware.
0396  */
0397 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
0398                       struct cxl_dev_state *cxlds,
0399                       const struct cxl_send_command *send_cmd)
0400 {
0401     struct cxl_mem_command mem_cmd;
0402     int rc;
0403 
0404     if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
0405         return -ENOTTY;
0406 
0407     /*
0408      * The user can never specify an input payload larger than what hardware
0409      * supports, but output can be arbitrarily large (simply write out as
0410      * much data as the hardware provides).
0411      */
0412     if (send_cmd->in.size > cxlds->payload_size)
0413         return -EINVAL;
0414 
0415     /* Sanitize and construct a cxl_mem_command */
0416     if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
0417         rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
0418     else
0419         rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
0420 
0421     if (rc)
0422         return rc;
0423 
0424     /* Sanitize and construct a cxl_mbox_cmd */
0425     return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
0426                  mem_cmd.info.size_in, mem_cmd.info.size_out,
0427                  send_cmd->in.payload);
0428 }
0429 
0430 int cxl_query_cmd(struct cxl_memdev *cxlmd,
0431           struct cxl_mem_query_commands __user *q)
0432 {
0433     struct device *dev = &cxlmd->dev;
0434     struct cxl_mem_command *cmd;
0435     u32 n_commands;
0436     int j = 0;
0437 
0438     dev_dbg(dev, "Query IOCTL\n");
0439 
0440     if (get_user(n_commands, &q->n_commands))
0441         return -EFAULT;
0442 
0443     /* returns the total number if 0 elements are requested. */
0444     if (n_commands == 0)
0445         return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
0446 
0447     /*
0448      * otherwise, return max(n_commands, total commands) cxl_command_info
0449      * structures.
0450      */
0451     cxl_for_each_cmd(cmd) {
0452         const struct cxl_command_info *info = &cmd->info;
0453 
0454         if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
0455             return -EFAULT;
0456 
0457         if (j == n_commands)
0458             break;
0459     }
0460 
0461     return 0;
0462 }
0463 
0464 /**
0465  * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
0466  * @cxlds: The device data for the operation
0467  * @mbox_cmd: The validated mailbox command.
0468  * @out_payload: Pointer to userspace's output payload.
0469  * @size_out: (Input) Max payload size to copy out.
0470  *            (Output) Payload size hardware generated.
0471  * @retval: Hardware generated return code from the operation.
0472  *
0473  * Return:
0474  *  * %0    - Mailbox transaction succeeded. This implies the mailbox
0475  *        protocol completed successfully not that the operation itself
0476  *        was successful.
0477  *  * %-ENOMEM  - Couldn't allocate a bounce buffer.
0478  *  * %-EFAULT  - Something happened with copy_to/from_user.
0479  *  * %-EINTR   - Mailbox acquisition interrupted.
0480  *  * %-EXXX    - Transaction level failures.
0481  *
0482  * Dispatches a mailbox command on behalf of a userspace request.
0483  * The output payload is copied to userspace.
0484  *
0485  * See cxl_send_cmd().
0486  */
0487 static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
0488                     struct cxl_mbox_cmd *mbox_cmd,
0489                     u64 out_payload, s32 *size_out,
0490                     u32 *retval)
0491 {
0492     struct device *dev = cxlds->dev;
0493     int rc;
0494 
0495     dev_dbg(dev,
0496         "Submitting %s command for user\n"
0497         "\topcode: %x\n"
0498         "\tsize: %zx\n",
0499         cxl_mem_opcode_to_name(mbox_cmd->opcode),
0500         mbox_cmd->opcode, mbox_cmd->size_in);
0501 
0502     rc = cxlds->mbox_send(cxlds, mbox_cmd);
0503     if (rc)
0504         goto out;
0505 
0506     /*
0507      * @size_out contains the max size that's allowed to be written back out
0508      * to userspace. While the payload may have written more output than
0509      * this it will have to be ignored.
0510      */
0511     if (mbox_cmd->size_out) {
0512         dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
0513                   "Invalid return size\n");
0514         if (copy_to_user(u64_to_user_ptr(out_payload),
0515                  mbox_cmd->payload_out, mbox_cmd->size_out)) {
0516             rc = -EFAULT;
0517             goto out;
0518         }
0519     }
0520 
0521     *size_out = mbox_cmd->size_out;
0522     *retval = mbox_cmd->return_code;
0523 
0524 out:
0525     cxl_mbox_cmd_dtor(mbox_cmd);
0526     return rc;
0527 }
0528 
0529 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
0530 {
0531     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0532     struct device *dev = &cxlmd->dev;
0533     struct cxl_send_command send;
0534     struct cxl_mbox_cmd mbox_cmd;
0535     int rc;
0536 
0537     dev_dbg(dev, "Send IOCTL\n");
0538 
0539     if (copy_from_user(&send, s, sizeof(send)))
0540         return -EFAULT;
0541 
0542     rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
0543     if (rc)
0544         return rc;
0545 
0546     rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
0547                       &send.out.size, &send.retval);
0548     if (rc)
0549         return rc;
0550 
0551     if (copy_to_user(s, &send, sizeof(send)))
0552         return -EFAULT;
0553 
0554     return 0;
0555 }
0556 
0557 static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
0558 {
0559     u32 remaining = size;
0560     u32 offset = 0;
0561 
0562     while (remaining) {
0563         u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
0564         struct cxl_mbox_get_log log = {
0565             .uuid = *uuid,
0566             .offset = cpu_to_le32(offset),
0567             .length = cpu_to_le32(xfer_size)
0568         };
0569         int rc;
0570 
0571         rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
0572                        out, xfer_size);
0573         if (rc < 0)
0574             return rc;
0575 
0576         out += xfer_size;
0577         remaining -= xfer_size;
0578         offset += xfer_size;
0579     }
0580 
0581     return 0;
0582 }
0583 
0584 /**
0585  * cxl_walk_cel() - Walk through the Command Effects Log.
0586  * @cxlds: The device data for the operation
0587  * @size: Length of the Command Effects Log.
0588  * @cel: CEL
0589  *
0590  * Iterate over each entry in the CEL and determine if the driver supports the
0591  * command. If so, the command is enabled for the device and can be used later.
0592  */
0593 static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
0594 {
0595     struct cxl_cel_entry *cel_entry;
0596     const int cel_entries = size / sizeof(*cel_entry);
0597     int i;
0598 
0599     cel_entry = (struct cxl_cel_entry *) cel;
0600 
0601     for (i = 0; i < cel_entries; i++) {
0602         u16 opcode = le16_to_cpu(cel_entry[i].opcode);
0603         struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
0604 
0605         if (!cmd) {
0606             dev_dbg(cxlds->dev,
0607                 "Opcode 0x%04x unsupported by driver", opcode);
0608             continue;
0609         }
0610 
0611         set_bit(cmd->info.id, cxlds->enabled_cmds);
0612     }
0613 }
0614 
0615 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
0616 {
0617     struct cxl_mbox_get_supported_logs *ret;
0618     int rc;
0619 
0620     ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
0621     if (!ret)
0622         return ERR_PTR(-ENOMEM);
0623 
0624     rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
0625                    cxlds->payload_size);
0626     if (rc < 0) {
0627         kvfree(ret);
0628         return ERR_PTR(rc);
0629     }
0630 
0631     return ret;
0632 }
0633 
0634 enum {
0635     CEL_UUID,
0636     VENDOR_DEBUG_UUID,
0637 };
0638 
0639 /* See CXL 2.0 Table 170. Get Log Input Payload */
0640 static const uuid_t log_uuid[] = {
0641     [CEL_UUID] = DEFINE_CXL_CEL_UUID,
0642     [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
0643 };
0644 
0645 /**
0646  * cxl_enumerate_cmds() - Enumerate commands for a device.
0647  * @cxlds: The device data for the operation
0648  *
0649  * Returns 0 if enumerate completed successfully.
0650  *
0651  * CXL devices have optional support for certain commands. This function will
0652  * determine the set of supported commands for the hardware and update the
0653  * enabled_cmds bitmap in the @cxlds.
0654  */
0655 int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
0656 {
0657     struct cxl_mbox_get_supported_logs *gsl;
0658     struct device *dev = cxlds->dev;
0659     struct cxl_mem_command *cmd;
0660     int i, rc;
0661 
0662     gsl = cxl_get_gsl(cxlds);
0663     if (IS_ERR(gsl))
0664         return PTR_ERR(gsl);
0665 
0666     rc = -ENOENT;
0667     for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
0668         u32 size = le32_to_cpu(gsl->entry[i].size);
0669         uuid_t uuid = gsl->entry[i].uuid;
0670         u8 *log;
0671 
0672         dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
0673 
0674         if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
0675             continue;
0676 
0677         log = kvmalloc(size, GFP_KERNEL);
0678         if (!log) {
0679             rc = -ENOMEM;
0680             goto out;
0681         }
0682 
0683         rc = cxl_xfer_log(cxlds, &uuid, size, log);
0684         if (rc) {
0685             kvfree(log);
0686             goto out;
0687         }
0688 
0689         cxl_walk_cel(cxlds, size, log);
0690         kvfree(log);
0691 
0692         /* In case CEL was bogus, enable some default commands. */
0693         cxl_for_each_cmd(cmd)
0694             if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
0695                 set_bit(cmd->info.id, cxlds->enabled_cmds);
0696 
0697         /* Found the required CEL */
0698         rc = 0;
0699     }
0700 
0701 out:
0702     kvfree(gsl);
0703     return rc;
0704 }
0705 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
0706 
0707 /**
0708  * cxl_mem_get_partition_info - Get partition info
0709  * @cxlds: The device data for the operation
0710  *
0711  * Retrieve the current partition info for the device specified.  The active
0712  * values are the current capacity in bytes.  If not 0, the 'next' values are
0713  * the pending values, in bytes, which take affect on next cold reset.
0714  *
0715  * Return: 0 if no error: or the result of the mailbox command.
0716  *
0717  * See CXL @8.2.9.5.2.1 Get Partition Info
0718  */
0719 static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
0720 {
0721     struct cxl_mbox_get_partition_info pi;
0722     int rc;
0723 
0724     rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
0725                    &pi, sizeof(pi));
0726 
0727     if (rc)
0728         return rc;
0729 
0730     cxlds->active_volatile_bytes =
0731         le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
0732     cxlds->active_persistent_bytes =
0733         le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
0734     cxlds->next_volatile_bytes =
0735         le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
0736     cxlds->next_persistent_bytes =
0737         le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
0738 
0739     return 0;
0740 }
0741 
0742 /**
0743  * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
0744  * @cxlds: The device data for the operation
0745  *
0746  * Return: 0 if identify was executed successfully.
0747  *
0748  * This will dispatch the identify command to the device and on success populate
0749  * structures to be exported to sysfs.
0750  */
0751 int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
0752 {
0753     /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
0754     struct cxl_mbox_identify id;
0755     int rc;
0756 
0757     rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
0758                    sizeof(id));
0759     if (rc < 0)
0760         return rc;
0761 
0762     cxlds->total_bytes =
0763         le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
0764     cxlds->volatile_only_bytes =
0765         le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
0766     cxlds->persistent_only_bytes =
0767         le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
0768     cxlds->partition_align_bytes =
0769         le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
0770 
0771     cxlds->lsa_size = le32_to_cpu(id.lsa_size);
0772     memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
0773 
0774     return 0;
0775 }
0776 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
0777 
0778 static int add_dpa_res(struct device *dev, struct resource *parent,
0779                struct resource *res, resource_size_t start,
0780                resource_size_t size, const char *type)
0781 {
0782     int rc;
0783 
0784     res->name = type;
0785     res->start = start;
0786     res->end = start + size - 1;
0787     res->flags = IORESOURCE_MEM;
0788     if (resource_size(res) == 0) {
0789         dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
0790         return 0;
0791     }
0792     rc = request_resource(parent, res);
0793     if (rc) {
0794         dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
0795             res, rc);
0796         return rc;
0797     }
0798 
0799     dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
0800 
0801     return 0;
0802 }
0803 
0804 int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
0805 {
0806     struct device *dev = cxlds->dev;
0807     int rc;
0808 
0809     cxlds->dpa_res =
0810         (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
0811 
0812     if (cxlds->partition_align_bytes == 0) {
0813         rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
0814                  cxlds->volatile_only_bytes, "ram");
0815         if (rc)
0816             return rc;
0817         return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
0818                    cxlds->volatile_only_bytes,
0819                    cxlds->persistent_only_bytes, "pmem");
0820     }
0821 
0822     rc = cxl_mem_get_partition_info(cxlds);
0823     if (rc) {
0824         dev_err(dev, "Failed to query partition information\n");
0825         return rc;
0826     }
0827 
0828     rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
0829              cxlds->active_volatile_bytes, "ram");
0830     if (rc)
0831         return rc;
0832     return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
0833                cxlds->active_volatile_bytes,
0834                cxlds->active_persistent_bytes, "pmem");
0835 }
0836 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
0837 
0838 struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
0839 {
0840     struct cxl_dev_state *cxlds;
0841 
0842     cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
0843     if (!cxlds) {
0844         dev_err(dev, "No memory available\n");
0845         return ERR_PTR(-ENOMEM);
0846     }
0847 
0848     mutex_init(&cxlds->mbox_mutex);
0849     cxlds->dev = dev;
0850 
0851     return cxlds;
0852 }
0853 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
0854 
0855 void __init cxl_mbox_init(void)
0856 {
0857     struct dentry *mbox_debugfs;
0858 
0859     mbox_debugfs = cxl_debugfs_create_dir("mbox");
0860     debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
0861                 &cxl_raw_allow_all);
0862 }