Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
0003 #include <linux/libnvdimm.h>
0004 #include <asm/unaligned.h>
0005 #include <linux/device.h>
0006 #include <linux/module.h>
0007 #include <linux/ndctl.h>
0008 #include <linux/async.h>
0009 #include <linux/slab.h>
0010 #include <linux/nd.h>
0011 #include "cxlmem.h"
0012 #include "cxl.h"
0013 
0014 /*
0015  * Ordered workqueue for cxl nvdimm device arrival and departure
0016  * to coordinate bus rescans when a bridge arrives and trigger remove
0017  * operations when the bridge is removed.
0018  */
0019 static struct workqueue_struct *cxl_pmem_wq;
0020 
0021 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
0022 
0023 static void clear_exclusive(void *cxlds)
0024 {
0025     clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
0026 }
0027 
0028 static void unregister_nvdimm(void *nvdimm)
0029 {
0030     struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
0031     struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
0032     struct cxl_pmem_region *cxlr_pmem;
0033 
0034     device_lock(&cxl_nvb->dev);
0035     cxlr_pmem = cxl_nvd->region;
0036     dev_set_drvdata(&cxl_nvd->dev, NULL);
0037     cxl_nvd->region = NULL;
0038     device_unlock(&cxl_nvb->dev);
0039 
0040     if (cxlr_pmem) {
0041         device_release_driver(&cxlr_pmem->dev);
0042         put_device(&cxlr_pmem->dev);
0043     }
0044 
0045     nvdimm_delete(nvdimm);
0046     cxl_nvd->bridge = NULL;
0047 }
0048 
0049 static int cxl_nvdimm_probe(struct device *dev)
0050 {
0051     struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
0052     struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
0053     unsigned long flags = 0, cmd_mask = 0;
0054     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0055     struct cxl_nvdimm_bridge *cxl_nvb;
0056     struct nvdimm *nvdimm;
0057     int rc;
0058 
0059     cxl_nvb = cxl_find_nvdimm_bridge(dev);
0060     if (!cxl_nvb)
0061         return -ENXIO;
0062 
0063     device_lock(&cxl_nvb->dev);
0064     if (!cxl_nvb->nvdimm_bus) {
0065         rc = -ENXIO;
0066         goto out;
0067     }
0068 
0069     set_exclusive_cxl_commands(cxlds, exclusive_cmds);
0070     rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
0071     if (rc)
0072         goto out;
0073 
0074     set_bit(NDD_LABELING, &flags);
0075     set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
0076     set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
0077     set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
0078     nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
0079                    cmd_mask, 0, NULL);
0080     if (!nvdimm) {
0081         rc = -ENOMEM;
0082         goto out;
0083     }
0084 
0085     dev_set_drvdata(dev, nvdimm);
0086     cxl_nvd->bridge = cxl_nvb;
0087     rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
0088 out:
0089     device_unlock(&cxl_nvb->dev);
0090     put_device(&cxl_nvb->dev);
0091 
0092     return rc;
0093 }
0094 
0095 static struct cxl_driver cxl_nvdimm_driver = {
0096     .name = "cxl_nvdimm",
0097     .probe = cxl_nvdimm_probe,
0098     .id = CXL_DEVICE_NVDIMM,
0099 };
0100 
0101 static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
0102                     struct nd_cmd_get_config_size *cmd,
0103                     unsigned int buf_len)
0104 {
0105     if (sizeof(*cmd) > buf_len)
0106         return -EINVAL;
0107 
0108     *cmd = (struct nd_cmd_get_config_size) {
0109          .config_size = cxlds->lsa_size,
0110          .max_xfer = cxlds->payload_size,
0111     };
0112 
0113     return 0;
0114 }
0115 
0116 static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
0117                     struct nd_cmd_get_config_data_hdr *cmd,
0118                     unsigned int buf_len)
0119 {
0120     struct cxl_mbox_get_lsa get_lsa;
0121     int rc;
0122 
0123     if (sizeof(*cmd) > buf_len)
0124         return -EINVAL;
0125     if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
0126         return -EINVAL;
0127 
0128     get_lsa = (struct cxl_mbox_get_lsa) {
0129         .offset = cpu_to_le32(cmd->in_offset),
0130         .length = cpu_to_le32(cmd->in_length),
0131     };
0132 
0133     rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
0134                    sizeof(get_lsa), cmd->out_buf, cmd->in_length);
0135     cmd->status = 0;
0136 
0137     return rc;
0138 }
0139 
0140 static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
0141                     struct nd_cmd_set_config_hdr *cmd,
0142                     unsigned int buf_len)
0143 {
0144     struct cxl_mbox_set_lsa *set_lsa;
0145     int rc;
0146 
0147     if (sizeof(*cmd) > buf_len)
0148         return -EINVAL;
0149 
0150     /* 4-byte status follows the input data in the payload */
0151     if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len)
0152         return -EINVAL;
0153 
0154     set_lsa =
0155         kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
0156     if (!set_lsa)
0157         return -ENOMEM;
0158 
0159     *set_lsa = (struct cxl_mbox_set_lsa) {
0160         .offset = cpu_to_le32(cmd->in_offset),
0161     };
0162     memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
0163 
0164     rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
0165                    struct_size(set_lsa, data, cmd->in_length),
0166                    NULL, 0);
0167 
0168     /*
0169      * Set "firmware" status (4-packed bytes at the end of the input
0170      * payload.
0171      */
0172     put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
0173     kvfree(set_lsa);
0174 
0175     return rc;
0176 }
0177 
0178 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
0179                    void *buf, unsigned int buf_len)
0180 {
0181     struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
0182     unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
0183     struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
0184     struct cxl_dev_state *cxlds = cxlmd->cxlds;
0185 
0186     if (!test_bit(cmd, &cmd_mask))
0187         return -ENOTTY;
0188 
0189     switch (cmd) {
0190     case ND_CMD_GET_CONFIG_SIZE:
0191         return cxl_pmem_get_config_size(cxlds, buf, buf_len);
0192     case ND_CMD_GET_CONFIG_DATA:
0193         return cxl_pmem_get_config_data(cxlds, buf, buf_len);
0194     case ND_CMD_SET_CONFIG_DATA:
0195         return cxl_pmem_set_config_data(cxlds, buf, buf_len);
0196     default:
0197         return -ENOTTY;
0198     }
0199 }
0200 
0201 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
0202             struct nvdimm *nvdimm, unsigned int cmd, void *buf,
0203             unsigned int buf_len, int *cmd_rc)
0204 {
0205     /*
0206      * No firmware response to translate, let the transport error
0207      * code take precedence.
0208      */
0209     *cmd_rc = 0;
0210 
0211     if (!nvdimm)
0212         return -ENOTTY;
0213     return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
0214 }
0215 
0216 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
0217 {
0218     if (cxl_nvb->nvdimm_bus)
0219         return true;
0220     cxl_nvb->nvdimm_bus =
0221         nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
0222     return cxl_nvb->nvdimm_bus != NULL;
0223 }
0224 
0225 static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
0226 {
0227     struct cxl_nvdimm *cxl_nvd;
0228 
0229     if (!is_cxl_nvdimm(dev))
0230         return 0;
0231 
0232     cxl_nvd = to_cxl_nvdimm(dev);
0233     if (cxl_nvd->bridge != cxl_nvb)
0234         return 0;
0235 
0236     device_release_driver(dev);
0237     return 0;
0238 }
0239 
0240 static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
0241 {
0242     struct cxl_pmem_region *cxlr_pmem;
0243 
0244     if (!is_cxl_pmem_region(dev))
0245         return 0;
0246 
0247     cxlr_pmem = to_cxl_pmem_region(dev);
0248     if (cxlr_pmem->bridge != cxl_nvb)
0249         return 0;
0250 
0251     device_release_driver(dev);
0252     return 0;
0253 }
0254 
0255 static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
0256                    struct nvdimm_bus *nvdimm_bus)
0257 {
0258     if (!nvdimm_bus)
0259         return;
0260 
0261     /*
0262      * Set the state of cxl_nvdimm devices to unbound / idle before
0263      * nvdimm_bus_unregister() rips the nvdimm objects out from
0264      * underneath them.
0265      */
0266     bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
0267              cxl_pmem_region_release_driver);
0268     bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
0269              cxl_nvdimm_release_driver);
0270     nvdimm_bus_unregister(nvdimm_bus);
0271 }
0272 
0273 static void cxl_nvb_update_state(struct work_struct *work)
0274 {
0275     struct cxl_nvdimm_bridge *cxl_nvb =
0276         container_of(work, typeof(*cxl_nvb), state_work);
0277     struct nvdimm_bus *victim_bus = NULL;
0278     bool release = false, rescan = false;
0279 
0280     device_lock(&cxl_nvb->dev);
0281     switch (cxl_nvb->state) {
0282     case CXL_NVB_ONLINE:
0283         if (!online_nvdimm_bus(cxl_nvb)) {
0284             dev_err(&cxl_nvb->dev,
0285                 "failed to establish nvdimm bus\n");
0286             release = true;
0287         } else
0288             rescan = true;
0289         break;
0290     case CXL_NVB_OFFLINE:
0291     case CXL_NVB_DEAD:
0292         victim_bus = cxl_nvb->nvdimm_bus;
0293         cxl_nvb->nvdimm_bus = NULL;
0294         break;
0295     default:
0296         break;
0297     }
0298     device_unlock(&cxl_nvb->dev);
0299 
0300     if (release)
0301         device_release_driver(&cxl_nvb->dev);
0302     if (rescan) {
0303         int rc = bus_rescan_devices(&cxl_bus_type);
0304 
0305         dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
0306     }
0307     offline_nvdimm_bus(cxl_nvb, victim_bus);
0308 
0309     put_device(&cxl_nvb->dev);
0310 }
0311 
0312 static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb)
0313 {
0314     /*
0315      * Take a reference that the workqueue will drop if new work
0316      * gets queued.
0317      */
0318     get_device(&cxl_nvb->dev);
0319     if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
0320         put_device(&cxl_nvb->dev);
0321 }
0322 
0323 static void cxl_nvdimm_bridge_remove(struct device *dev)
0324 {
0325     struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
0326 
0327     if (cxl_nvb->state == CXL_NVB_ONLINE)
0328         cxl_nvb->state = CXL_NVB_OFFLINE;
0329     cxl_nvdimm_bridge_state_work(cxl_nvb);
0330 }
0331 
0332 static int cxl_nvdimm_bridge_probe(struct device *dev)
0333 {
0334     struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
0335 
0336     if (cxl_nvb->state == CXL_NVB_DEAD)
0337         return -ENXIO;
0338 
0339     if (cxl_nvb->state == CXL_NVB_NEW) {
0340         cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
0341             .provider_name = "CXL",
0342             .module = THIS_MODULE,
0343             .ndctl = cxl_pmem_ctl,
0344         };
0345 
0346         INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state);
0347     }
0348 
0349     cxl_nvb->state = CXL_NVB_ONLINE;
0350     cxl_nvdimm_bridge_state_work(cxl_nvb);
0351 
0352     return 0;
0353 }
0354 
0355 static struct cxl_driver cxl_nvdimm_bridge_driver = {
0356     .name = "cxl_nvdimm_bridge",
0357     .probe = cxl_nvdimm_bridge_probe,
0358     .remove = cxl_nvdimm_bridge_remove,
0359     .id = CXL_DEVICE_NVDIMM_BRIDGE,
0360 };
0361 
0362 static int match_cxl_nvdimm(struct device *dev, void *data)
0363 {
0364     return is_cxl_nvdimm(dev);
0365 }
0366 
0367 static void unregister_nvdimm_region(void *nd_region)
0368 {
0369     struct cxl_nvdimm_bridge *cxl_nvb;
0370     struct cxl_pmem_region *cxlr_pmem;
0371     int i;
0372 
0373     cxlr_pmem = nd_region_provider_data(nd_region);
0374     cxl_nvb = cxlr_pmem->bridge;
0375     device_lock(&cxl_nvb->dev);
0376     for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
0377         struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
0378         struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
0379 
0380         if (cxl_nvd->region) {
0381             put_device(&cxlr_pmem->dev);
0382             cxl_nvd->region = NULL;
0383         }
0384     }
0385     device_unlock(&cxl_nvb->dev);
0386 
0387     nvdimm_region_delete(nd_region);
0388 }
0389 
0390 static void cxlr_pmem_remove_resource(void *res)
0391 {
0392     remove_resource(res);
0393 }
0394 
0395 struct cxl_pmem_region_info {
0396     u64 offset;
0397     u64 serial;
0398 };
0399 
0400 static int cxl_pmem_region_probe(struct device *dev)
0401 {
0402     struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
0403     struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
0404     struct cxl_region *cxlr = cxlr_pmem->cxlr;
0405     struct cxl_pmem_region_info *info = NULL;
0406     struct cxl_nvdimm_bridge *cxl_nvb;
0407     struct nd_interleave_set *nd_set;
0408     struct nd_region_desc ndr_desc;
0409     struct cxl_nvdimm *cxl_nvd;
0410     struct nvdimm *nvdimm;
0411     struct resource *res;
0412     int rc, i = 0;
0413 
0414     cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev);
0415     if (!cxl_nvb) {
0416         dev_dbg(dev, "bridge not found\n");
0417         return -ENXIO;
0418     }
0419     cxlr_pmem->bridge = cxl_nvb;
0420 
0421     device_lock(&cxl_nvb->dev);
0422     if (!cxl_nvb->nvdimm_bus) {
0423         dev_dbg(dev, "nvdimm bus not found\n");
0424         rc = -ENXIO;
0425         goto err;
0426     }
0427 
0428     memset(&mappings, 0, sizeof(mappings));
0429     memset(&ndr_desc, 0, sizeof(ndr_desc));
0430 
0431     res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
0432     if (!res) {
0433         rc = -ENOMEM;
0434         goto err;
0435     }
0436 
0437     res->name = "Persistent Memory";
0438     res->start = cxlr_pmem->hpa_range.start;
0439     res->end = cxlr_pmem->hpa_range.end;
0440     res->flags = IORESOURCE_MEM;
0441     res->desc = IORES_DESC_PERSISTENT_MEMORY;
0442 
0443     rc = insert_resource(&iomem_resource, res);
0444     if (rc)
0445         goto err;
0446 
0447     rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
0448     if (rc)
0449         goto err;
0450 
0451     ndr_desc.res = res;
0452     ndr_desc.provider_data = cxlr_pmem;
0453 
0454     ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
0455     ndr_desc.target_node = phys_to_target_node(res->start);
0456     if (ndr_desc.target_node == NUMA_NO_NODE) {
0457         ndr_desc.target_node = ndr_desc.numa_node;
0458         dev_dbg(&cxlr->dev, "changing target node from %d to %d",
0459             NUMA_NO_NODE, ndr_desc.target_node);
0460     }
0461 
0462     nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
0463     if (!nd_set) {
0464         rc = -ENOMEM;
0465         goto err;
0466     }
0467 
0468     ndr_desc.memregion = cxlr->id;
0469     set_bit(ND_REGION_CXL, &ndr_desc.flags);
0470     set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
0471 
0472     info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
0473     if (!info) {
0474         rc = -ENOMEM;
0475         goto err;
0476     }
0477 
0478     for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
0479         struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
0480         struct cxl_memdev *cxlmd = m->cxlmd;
0481         struct cxl_dev_state *cxlds = cxlmd->cxlds;
0482         struct device *d;
0483 
0484         d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
0485         if (!d) {
0486             dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i,
0487                 dev_name(&cxlmd->dev));
0488             rc = -ENODEV;
0489             goto err;
0490         }
0491 
0492         /* safe to drop ref now with bridge lock held */
0493         put_device(d);
0494 
0495         cxl_nvd = to_cxl_nvdimm(d);
0496         nvdimm = dev_get_drvdata(&cxl_nvd->dev);
0497         if (!nvdimm) {
0498             dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
0499                 dev_name(&cxlmd->dev));
0500             rc = -ENODEV;
0501             goto err;
0502         }
0503         cxl_nvd->region = cxlr_pmem;
0504         get_device(&cxlr_pmem->dev);
0505         m->cxl_nvd = cxl_nvd;
0506         mappings[i] = (struct nd_mapping_desc) {
0507             .nvdimm = nvdimm,
0508             .start = m->start,
0509             .size = m->size,
0510             .position = i,
0511         };
0512         info[i].offset = m->start;
0513         info[i].serial = cxlds->serial;
0514     }
0515     ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
0516     ndr_desc.mapping = mappings;
0517 
0518     /*
0519      * TODO enable CXL labels which skip the need for 'interleave-set cookie'
0520      */
0521     nd_set->cookie1 =
0522         nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
0523     nd_set->cookie2 = nd_set->cookie1;
0524     ndr_desc.nd_set = nd_set;
0525 
0526     cxlr_pmem->nd_region =
0527         nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
0528     if (!cxlr_pmem->nd_region) {
0529         rc = -ENOMEM;
0530         goto err;
0531     }
0532 
0533     rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
0534                       cxlr_pmem->nd_region);
0535 out:
0536     kfree(info);
0537     device_unlock(&cxl_nvb->dev);
0538     put_device(&cxl_nvb->dev);
0539 
0540     return rc;
0541 
0542 err:
0543     dev_dbg(dev, "failed to create nvdimm region\n");
0544     for (i--; i >= 0; i--) {
0545         nvdimm = mappings[i].nvdimm;
0546         cxl_nvd = nvdimm_provider_data(nvdimm);
0547         put_device(&cxl_nvd->region->dev);
0548         cxl_nvd->region = NULL;
0549     }
0550     goto out;
0551 }
0552 
0553 static struct cxl_driver cxl_pmem_region_driver = {
0554     .name = "cxl_pmem_region",
0555     .probe = cxl_pmem_region_probe,
0556     .id = CXL_DEVICE_PMEM_REGION,
0557 };
0558 
0559 /*
0560  * Return all bridges to the CXL_NVB_NEW state to invalidate any
0561  * ->state_work referring to the now destroyed cxl_pmem_wq.
0562  */
0563 static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
0564 {
0565     struct cxl_nvdimm_bridge *cxl_nvb;
0566 
0567     if (!is_cxl_nvdimm_bridge(dev))
0568         return 0;
0569 
0570     cxl_nvb = to_cxl_nvdimm_bridge(dev);
0571     device_lock(dev);
0572     cxl_nvb->state = CXL_NVB_NEW;
0573     device_unlock(dev);
0574 
0575     return 0;
0576 }
0577 
0578 static void destroy_cxl_pmem_wq(void)
0579 {
0580     destroy_workqueue(cxl_pmem_wq);
0581     bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset);
0582 }
0583 
0584 static __init int cxl_pmem_init(void)
0585 {
0586     int rc;
0587 
0588     set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
0589     set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
0590 
0591     cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
0592     if (!cxl_pmem_wq)
0593         return -ENXIO;
0594 
0595     rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
0596     if (rc)
0597         goto err_bridge;
0598 
0599     rc = cxl_driver_register(&cxl_nvdimm_driver);
0600     if (rc)
0601         goto err_nvdimm;
0602 
0603     rc = cxl_driver_register(&cxl_pmem_region_driver);
0604     if (rc)
0605         goto err_region;
0606 
0607     return 0;
0608 
0609 err_region:
0610     cxl_driver_unregister(&cxl_nvdimm_driver);
0611 err_nvdimm:
0612     cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
0613 err_bridge:
0614     destroy_cxl_pmem_wq();
0615     return rc;
0616 }
0617 
0618 static __exit void cxl_pmem_exit(void)
0619 {
0620     cxl_driver_unregister(&cxl_pmem_region_driver);
0621     cxl_driver_unregister(&cxl_nvdimm_driver);
0622     cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
0623     destroy_cxl_pmem_wq();
0624 }
0625 
0626 MODULE_LICENSE("GPL v2");
0627 module_init(cxl_pmem_init);
0628 module_exit(cxl_pmem_exit);
0629 MODULE_IMPORT_NS(CXL);
0630 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
0631 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
0632 MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);