Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright(c) 2020 Intel Corporation. */
0003 
0004 #include <linux/device.h>
0005 #include <linux/slab.h>
0006 #include <linux/idr.h>
0007 #include <linux/pci.h>
0008 #include <cxlmem.h>
0009 #include "core.h"
0010 
0011 static DECLARE_RWSEM(cxl_memdev_rwsem);
0012 
0013 /*
0014  * An entire PCI topology full of devices should be enough for any
0015  * config
0016  */
0017 #define CXL_MEM_MAX_DEVS 65536
0018 
0019 static int cxl_mem_major;
0020 static DEFINE_IDA(cxl_memdev_ida);
0021 
0022 static void cxl_memdev_release(struct device *dev)
0023 {
0024     struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
0025 
0026     ida_free(&cxl_memdev_ida, cxlmd->id);
0027     kfree(cxlmd);
0028 }
0029 
0030 static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
0031                 kgid_t *gid)
0032 {
0033     return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
0034 }
0035 
0036 static ssize_t firmware_version_show(struct device *dev,
0037                      struct device_attribute *attr, char *buf)
0038 {
0039     struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
0040     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0041 
0042     return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
0043 }
0044 static DEVICE_ATTR_RO(firmware_version);
0045 
0046 static ssize_t payload_max_show(struct device *dev,
0047                 struct device_attribute *attr, char *buf)
0048 {
0049     struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
0050     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0051 
0052     return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
0053 }
0054 static DEVICE_ATTR_RO(payload_max);
0055 
0056 static ssize_t label_storage_size_show(struct device *dev,
0057                        struct device_attribute *attr, char *buf)
0058 {
0059     struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
0060     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0061 
0062     return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
0063 }
0064 static DEVICE_ATTR_RO(label_storage_size);
0065 
0066 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
0067                  char *buf)
0068 {
0069     struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
0070     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0071     unsigned long long len = resource_size(&cxlds->ram_res);
0072 
0073     return sysfs_emit(buf, "%#llx\n", len);
0074 }
0075 
0076 static struct device_attribute dev_attr_ram_size =
0077     __ATTR(size, 0444, ram_size_show, NULL);
0078 
0079 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
0080                   char *buf)
0081 {
0082     struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
0083     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0084     unsigned long long len = resource_size(&cxlds->pmem_res);
0085 
0086     return sysfs_emit(buf, "%#llx\n", len);
0087 }
0088 
0089 static struct device_attribute dev_attr_pmem_size =
0090     __ATTR(size, 0444, pmem_size_show, NULL);
0091 
0092 static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
0093                char *buf)
0094 {
0095     struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
0096     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0097 
0098     return sysfs_emit(buf, "%#llx\n", cxlds->serial);
0099 }
0100 static DEVICE_ATTR_RO(serial);
0101 
0102 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
0103                   char *buf)
0104 {
0105     return sprintf(buf, "%d\n", dev_to_node(dev));
0106 }
0107 static DEVICE_ATTR_RO(numa_node);
0108 
0109 static struct attribute *cxl_memdev_attributes[] = {
0110     &dev_attr_serial.attr,
0111     &dev_attr_firmware_version.attr,
0112     &dev_attr_payload_max.attr,
0113     &dev_attr_label_storage_size.attr,
0114     &dev_attr_numa_node.attr,
0115     NULL,
0116 };
0117 
0118 static struct attribute *cxl_memdev_pmem_attributes[] = {
0119     &dev_attr_pmem_size.attr,
0120     NULL,
0121 };
0122 
0123 static struct attribute *cxl_memdev_ram_attributes[] = {
0124     &dev_attr_ram_size.attr,
0125     NULL,
0126 };
0127 
0128 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
0129                   int n)
0130 {
0131     if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
0132         return 0;
0133     return a->mode;
0134 }
0135 
0136 static struct attribute_group cxl_memdev_attribute_group = {
0137     .attrs = cxl_memdev_attributes,
0138     .is_visible = cxl_memdev_visible,
0139 };
0140 
0141 static struct attribute_group cxl_memdev_ram_attribute_group = {
0142     .name = "ram",
0143     .attrs = cxl_memdev_ram_attributes,
0144 };
0145 
0146 static struct attribute_group cxl_memdev_pmem_attribute_group = {
0147     .name = "pmem",
0148     .attrs = cxl_memdev_pmem_attributes,
0149 };
0150 
0151 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
0152     &cxl_memdev_attribute_group,
0153     &cxl_memdev_ram_attribute_group,
0154     &cxl_memdev_pmem_attribute_group,
0155     NULL,
0156 };
0157 
0158 static const struct device_type cxl_memdev_type = {
0159     .name = "cxl_memdev",
0160     .release = cxl_memdev_release,
0161     .devnode = cxl_memdev_devnode,
0162     .groups = cxl_memdev_attribute_groups,
0163 };
0164 
0165 bool is_cxl_memdev(struct device *dev)
0166 {
0167     return dev->type == &cxl_memdev_type;
0168 }
0169 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
0170 
0171 /**
0172  * set_exclusive_cxl_commands() - atomically disable user cxl commands
0173  * @cxlds: The device state to operate on
0174  * @cmds: bitmap of commands to mark exclusive
0175  *
0176  * Grab the cxl_memdev_rwsem in write mode to flush in-flight
0177  * invocations of the ioctl path and then disable future execution of
0178  * commands with the command ids set in @cmds.
0179  */
0180 void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
0181 {
0182     down_write(&cxl_memdev_rwsem);
0183     bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
0184           CXL_MEM_COMMAND_ID_MAX);
0185     up_write(&cxl_memdev_rwsem);
0186 }
0187 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
0188 
0189 /**
0190  * clear_exclusive_cxl_commands() - atomically enable user cxl commands
0191  * @cxlds: The device state to modify
0192  * @cmds: bitmap of commands to mark available for userspace
0193  */
0194 void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
0195 {
0196     down_write(&cxl_memdev_rwsem);
0197     bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
0198               CXL_MEM_COMMAND_ID_MAX);
0199     up_write(&cxl_memdev_rwsem);
0200 }
0201 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
0202 
0203 static void cxl_memdev_shutdown(struct device *dev)
0204 {
0205     struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
0206 
0207     down_write(&cxl_memdev_rwsem);
0208     cxlmd->cxlds = NULL;
0209     up_write(&cxl_memdev_rwsem);
0210 }
0211 
0212 static void cxl_memdev_unregister(void *_cxlmd)
0213 {
0214     struct cxl_memdev *cxlmd = _cxlmd;
0215     struct device *dev = &cxlmd->dev;
0216 
0217     cxl_memdev_shutdown(dev);
0218     cdev_device_del(&cxlmd->cdev, dev);
0219     put_device(dev);
0220 }
0221 
0222 static void detach_memdev(struct work_struct *work)
0223 {
0224     struct cxl_memdev *cxlmd;
0225 
0226     cxlmd = container_of(work, typeof(*cxlmd), detach_work);
0227     device_release_driver(&cxlmd->dev);
0228     put_device(&cxlmd->dev);
0229 }
0230 
0231 static struct lock_class_key cxl_memdev_key;
0232 
0233 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
0234                        const struct file_operations *fops)
0235 {
0236     struct cxl_memdev *cxlmd;
0237     struct device *dev;
0238     struct cdev *cdev;
0239     int rc;
0240 
0241     cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
0242     if (!cxlmd)
0243         return ERR_PTR(-ENOMEM);
0244 
0245     rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
0246     if (rc < 0)
0247         goto err;
0248     cxlmd->id = rc;
0249 
0250     dev = &cxlmd->dev;
0251     device_initialize(dev);
0252     lockdep_set_class(&dev->mutex, &cxl_memdev_key);
0253     dev->parent = cxlds->dev;
0254     dev->bus = &cxl_bus_type;
0255     dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
0256     dev->type = &cxl_memdev_type;
0257     device_set_pm_not_required(dev);
0258     INIT_WORK(&cxlmd->detach_work, detach_memdev);
0259 
0260     cdev = &cxlmd->cdev;
0261     cdev_init(cdev, fops);
0262     return cxlmd;
0263 
0264 err:
0265     kfree(cxlmd);
0266     return ERR_PTR(rc);
0267 }
0268 
0269 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
0270                    unsigned long arg)
0271 {
0272     switch (cmd) {
0273     case CXL_MEM_QUERY_COMMANDS:
0274         return cxl_query_cmd(cxlmd, (void __user *)arg);
0275     case CXL_MEM_SEND_COMMAND:
0276         return cxl_send_cmd(cxlmd, (void __user *)arg);
0277     default:
0278         return -ENOTTY;
0279     }
0280 }
0281 
0282 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
0283                  unsigned long arg)
0284 {
0285     struct cxl_memdev *cxlmd = file->private_data;
0286     int rc = -ENXIO;
0287 
0288     down_read(&cxl_memdev_rwsem);
0289     if (cxlmd->cxlds)
0290         rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
0291     up_read(&cxl_memdev_rwsem);
0292 
0293     return rc;
0294 }
0295 
0296 static int cxl_memdev_open(struct inode *inode, struct file *file)
0297 {
0298     struct cxl_memdev *cxlmd =
0299         container_of(inode->i_cdev, typeof(*cxlmd), cdev);
0300 
0301     get_device(&cxlmd->dev);
0302     file->private_data = cxlmd;
0303 
0304     return 0;
0305 }
0306 
0307 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
0308 {
0309     struct cxl_memdev *cxlmd =
0310         container_of(inode->i_cdev, typeof(*cxlmd), cdev);
0311 
0312     put_device(&cxlmd->dev);
0313 
0314     return 0;
0315 }
0316 
0317 static const struct file_operations cxl_memdev_fops = {
0318     .owner = THIS_MODULE,
0319     .unlocked_ioctl = cxl_memdev_ioctl,
0320     .open = cxl_memdev_open,
0321     .release = cxl_memdev_release_file,
0322     .compat_ioctl = compat_ptr_ioctl,
0323     .llseek = noop_llseek,
0324 };
0325 
0326 struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
0327 {
0328     struct cxl_memdev *cxlmd;
0329     struct device *dev;
0330     struct cdev *cdev;
0331     int rc;
0332 
0333     cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
0334     if (IS_ERR(cxlmd))
0335         return cxlmd;
0336 
0337     dev = &cxlmd->dev;
0338     rc = dev_set_name(dev, "mem%d", cxlmd->id);
0339     if (rc)
0340         goto err;
0341 
0342     /*
0343      * Activate ioctl operations, no cxl_memdev_rwsem manipulation
0344      * needed as this is ordered with cdev_add() publishing the device.
0345      */
0346     cxlmd->cxlds = cxlds;
0347 
0348     cdev = &cxlmd->cdev;
0349     rc = cdev_device_add(cdev, dev);
0350     if (rc)
0351         goto err;
0352 
0353     rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
0354     if (rc)
0355         return ERR_PTR(rc);
0356     return cxlmd;
0357 
0358 err:
0359     /*
0360      * The cdev was briefly live, shutdown any ioctl operations that
0361      * saw that state.
0362      */
0363     cxl_memdev_shutdown(dev);
0364     put_device(dev);
0365     return ERR_PTR(rc);
0366 }
0367 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
0368 
0369 __init int cxl_memdev_init(void)
0370 {
0371     dev_t devt;
0372     int rc;
0373 
0374     rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
0375     if (rc)
0376         return rc;
0377 
0378     cxl_mem_major = MAJOR(devt);
0379 
0380     return 0;
0381 }
0382 
0383 void cxl_memdev_exit(void)
0384 {
0385     unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
0386 }