Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
0004  */
0005 #include <linux/list_sort.h>
0006 #include <linux/libnvdimm.h>
0007 #include <linux/module.h>
0008 #include <linux/nospec.h>
0009 #include <linux/mutex.h>
0010 #include <linux/ndctl.h>
0011 #include <linux/sysfs.h>
0012 #include <linux/delay.h>
0013 #include <linux/list.h>
0014 #include <linux/acpi.h>
0015 #include <linux/sort.h>
0016 #include <linux/io.h>
0017 #include <linux/nd.h>
0018 #include <asm/cacheflush.h>
0019 #include <acpi/nfit.h>
0020 #include "intel.h"
0021 #include "nfit.h"
0022 
0023 /*
0024  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
0025  * irrelevant.
0026  */
0027 #include <linux/io-64-nonatomic-hi-lo.h>
0028 
0029 static bool force_enable_dimms;
0030 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
0031 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
0032 
0033 static bool disable_vendor_specific;
0034 module_param(disable_vendor_specific, bool, S_IRUGO);
0035 MODULE_PARM_DESC(disable_vendor_specific,
0036         "Limit commands to the publicly specified set");
0037 
0038 static unsigned long override_dsm_mask;
0039 module_param(override_dsm_mask, ulong, S_IRUGO);
0040 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
0041 
0042 static int default_dsm_family = -1;
0043 module_param(default_dsm_family, int, S_IRUGO);
0044 MODULE_PARM_DESC(default_dsm_family,
0045         "Try this DSM type first when identifying NVDIMM family");
0046 
0047 static bool no_init_ars;
0048 module_param(no_init_ars, bool, 0644);
0049 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
0050 
0051 static bool force_labels;
0052 module_param(force_labels, bool, 0444);
0053 MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
0054 
0055 LIST_HEAD(acpi_descs);
0056 DEFINE_MUTEX(acpi_desc_lock);
0057 
0058 static struct workqueue_struct *nfit_wq;
0059 
0060 struct nfit_table_prev {
0061     struct list_head spas;
0062     struct list_head memdevs;
0063     struct list_head dcrs;
0064     struct list_head bdws;
0065     struct list_head idts;
0066     struct list_head flushes;
0067 };
0068 
0069 static guid_t nfit_uuid[NFIT_UUID_MAX];
0070 
0071 const guid_t *to_nfit_uuid(enum nfit_uuids id)
0072 {
0073     return &nfit_uuid[id];
0074 }
0075 EXPORT_SYMBOL(to_nfit_uuid);
0076 
0077 static const guid_t *to_nfit_bus_uuid(int family)
0078 {
0079     if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
0080             "only secondary bus families can be translated\n"))
0081         return NULL;
0082     /*
0083      * The index of bus UUIDs starts immediately following the last
0084      * NVDIMM/leaf family.
0085      */
0086     return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
0087 }
0088 
0089 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
0090 {
0091     struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
0092 
0093     /*
0094      * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
0095      * acpi_device.
0096      */
0097     if (!nd_desc->provider_name
0098             || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
0099         return NULL;
0100 
0101     return to_acpi_device(acpi_desc->dev);
0102 }
0103 
0104 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
0105 {
0106     struct nd_cmd_clear_error *clear_err;
0107     struct nd_cmd_ars_status *ars_status;
0108     u16 flags;
0109 
0110     switch (cmd) {
0111     case ND_CMD_ARS_CAP:
0112         if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
0113             return -ENOTTY;
0114 
0115         /* Command failed */
0116         if (status & 0xffff)
0117             return -EIO;
0118 
0119         /* No supported scan types for this range */
0120         flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
0121         if ((status >> 16 & flags) == 0)
0122             return -ENOTTY;
0123         return 0;
0124     case ND_CMD_ARS_START:
0125         /* ARS is in progress */
0126         if ((status & 0xffff) == NFIT_ARS_START_BUSY)
0127             return -EBUSY;
0128 
0129         /* Command failed */
0130         if (status & 0xffff)
0131             return -EIO;
0132         return 0;
0133     case ND_CMD_ARS_STATUS:
0134         ars_status = buf;
0135         /* Command failed */
0136         if (status & 0xffff)
0137             return -EIO;
0138         /* Check extended status (Upper two bytes) */
0139         if (status == NFIT_ARS_STATUS_DONE)
0140             return 0;
0141 
0142         /* ARS is in progress */
0143         if (status == NFIT_ARS_STATUS_BUSY)
0144             return -EBUSY;
0145 
0146         /* No ARS performed for the current boot */
0147         if (status == NFIT_ARS_STATUS_NONE)
0148             return -EAGAIN;
0149 
0150         /*
0151          * ARS interrupted, either we overflowed or some other
0152          * agent wants the scan to stop.  If we didn't overflow
0153          * then just continue with the returned results.
0154          */
0155         if (status == NFIT_ARS_STATUS_INTR) {
0156             if (ars_status->out_length >= 40 && (ars_status->flags
0157                         & NFIT_ARS_F_OVERFLOW))
0158                 return -ENOSPC;
0159             return 0;
0160         }
0161 
0162         /* Unknown status */
0163         if (status >> 16)
0164             return -EIO;
0165         return 0;
0166     case ND_CMD_CLEAR_ERROR:
0167         clear_err = buf;
0168         if (status & 0xffff)
0169             return -EIO;
0170         if (!clear_err->cleared)
0171             return -EIO;
0172         if (clear_err->length > clear_err->cleared)
0173             return clear_err->cleared;
0174         return 0;
0175     default:
0176         break;
0177     }
0178 
0179     /* all other non-zero status results in an error */
0180     if (status)
0181         return -EIO;
0182     return 0;
0183 }
0184 
0185 #define ACPI_LABELS_LOCKED 3
0186 
0187 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
0188         u32 status)
0189 {
0190     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
0191 
0192     switch (cmd) {
0193     case ND_CMD_GET_CONFIG_SIZE:
0194         /*
0195          * In the _LSI, _LSR, _LSW case the locked status is
0196          * communicated via the read/write commands
0197          */
0198         if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
0199             break;
0200 
0201         if (status >> 16 & ND_CONFIG_LOCKED)
0202             return -EACCES;
0203         break;
0204     case ND_CMD_GET_CONFIG_DATA:
0205         if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
0206                 && status == ACPI_LABELS_LOCKED)
0207             return -EACCES;
0208         break;
0209     case ND_CMD_SET_CONFIG_DATA:
0210         if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
0211                 && status == ACPI_LABELS_LOCKED)
0212             return -EACCES;
0213         break;
0214     default:
0215         break;
0216     }
0217 
0218     /* all other non-zero status results in an error */
0219     if (status)
0220         return -EIO;
0221     return 0;
0222 }
0223 
0224 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
0225         u32 status)
0226 {
0227     if (!nvdimm)
0228         return xlat_bus_status(buf, cmd, status);
0229     return xlat_nvdimm_status(nvdimm, buf, cmd, status);
0230 }
0231 
0232 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
0233 static union acpi_object *pkg_to_buf(union acpi_object *pkg)
0234 {
0235     int i;
0236     void *dst;
0237     size_t size = 0;
0238     union acpi_object *buf = NULL;
0239 
0240     if (pkg->type != ACPI_TYPE_PACKAGE) {
0241         WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
0242                 pkg->type);
0243         goto err;
0244     }
0245 
0246     for (i = 0; i < pkg->package.count; i++) {
0247         union acpi_object *obj = &pkg->package.elements[i];
0248 
0249         if (obj->type == ACPI_TYPE_INTEGER)
0250             size += 4;
0251         else if (obj->type == ACPI_TYPE_BUFFER)
0252             size += obj->buffer.length;
0253         else {
0254             WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
0255                     obj->type);
0256             goto err;
0257         }
0258     }
0259 
0260     buf = ACPI_ALLOCATE(sizeof(*buf) + size);
0261     if (!buf)
0262         goto err;
0263 
0264     dst = buf + 1;
0265     buf->type = ACPI_TYPE_BUFFER;
0266     buf->buffer.length = size;
0267     buf->buffer.pointer = dst;
0268     for (i = 0; i < pkg->package.count; i++) {
0269         union acpi_object *obj = &pkg->package.elements[i];
0270 
0271         if (obj->type == ACPI_TYPE_INTEGER) {
0272             memcpy(dst, &obj->integer.value, 4);
0273             dst += 4;
0274         } else if (obj->type == ACPI_TYPE_BUFFER) {
0275             memcpy(dst, obj->buffer.pointer, obj->buffer.length);
0276             dst += obj->buffer.length;
0277         }
0278     }
0279 err:
0280     ACPI_FREE(pkg);
0281     return buf;
0282 }
0283 
0284 static union acpi_object *int_to_buf(union acpi_object *integer)
0285 {
0286     union acpi_object *buf = NULL;
0287     void *dst = NULL;
0288 
0289     if (integer->type != ACPI_TYPE_INTEGER) {
0290         WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
0291                 integer->type);
0292         goto err;
0293     }
0294 
0295     buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
0296     if (!buf)
0297         goto err;
0298 
0299     dst = buf + 1;
0300     buf->type = ACPI_TYPE_BUFFER;
0301     buf->buffer.length = 4;
0302     buf->buffer.pointer = dst;
0303     memcpy(dst, &integer->integer.value, 4);
0304 err:
0305     ACPI_FREE(integer);
0306     return buf;
0307 }
0308 
0309 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
0310         u32 len, void *data)
0311 {
0312     acpi_status rc;
0313     struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
0314     struct acpi_object_list input = {
0315         .count = 3,
0316         .pointer = (union acpi_object []) {
0317             [0] = {
0318                 .integer.type = ACPI_TYPE_INTEGER,
0319                 .integer.value = offset,
0320             },
0321             [1] = {
0322                 .integer.type = ACPI_TYPE_INTEGER,
0323                 .integer.value = len,
0324             },
0325             [2] = {
0326                 .buffer.type = ACPI_TYPE_BUFFER,
0327                 .buffer.pointer = data,
0328                 .buffer.length = len,
0329             },
0330         },
0331     };
0332 
0333     rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
0334     if (ACPI_FAILURE(rc))
0335         return NULL;
0336     return int_to_buf(buf.pointer);
0337 }
0338 
0339 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
0340         u32 len)
0341 {
0342     acpi_status rc;
0343     struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
0344     struct acpi_object_list input = {
0345         .count = 2,
0346         .pointer = (union acpi_object []) {
0347             [0] = {
0348                 .integer.type = ACPI_TYPE_INTEGER,
0349                 .integer.value = offset,
0350             },
0351             [1] = {
0352                 .integer.type = ACPI_TYPE_INTEGER,
0353                 .integer.value = len,
0354             },
0355         },
0356     };
0357 
0358     rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
0359     if (ACPI_FAILURE(rc))
0360         return NULL;
0361     return pkg_to_buf(buf.pointer);
0362 }
0363 
0364 static union acpi_object *acpi_label_info(acpi_handle handle)
0365 {
0366     acpi_status rc;
0367     struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
0368 
0369     rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
0370     if (ACPI_FAILURE(rc))
0371         return NULL;
0372     return pkg_to_buf(buf.pointer);
0373 }
0374 
0375 static u8 nfit_dsm_revid(unsigned family, unsigned func)
0376 {
0377     static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
0378         [NVDIMM_FAMILY_INTEL] = {
0379             [NVDIMM_INTEL_GET_MODES ...
0380                 NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
0381         },
0382     };
0383     u8 id;
0384 
0385     if (family > NVDIMM_FAMILY_MAX)
0386         return 0;
0387     if (func > NVDIMM_CMD_MAX)
0388         return 0;
0389     id = revid_table[family][func];
0390     if (id == 0)
0391         return 1; /* default */
0392     return id;
0393 }
0394 
0395 static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
0396 {
0397     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
0398 
0399     if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
0400             && func >= NVDIMM_INTEL_GET_SECURITY_STATE
0401             && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
0402         return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
0403     return true;
0404 }
0405 
0406 static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
0407         struct nd_cmd_pkg *call_pkg, int *family)
0408 {
0409     if (call_pkg) {
0410         int i;
0411 
0412         if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
0413             return -ENOTTY;
0414 
0415         for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
0416             if (call_pkg->nd_reserved2[i])
0417                 return -EINVAL;
0418         *family = call_pkg->nd_family;
0419         return call_pkg->nd_command;
0420     }
0421 
0422     /* In the !call_pkg case, bus commands == bus functions */
0423     if (!nfit_mem)
0424         return cmd;
0425 
0426     /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
0427     if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
0428         return cmd;
0429 
0430     /*
0431      * Force function number validation to fail since 0 is never
0432      * published as a valid function in dsm_mask.
0433      */
0434     return 0;
0435 }
0436 
0437 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
0438         unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
0439 {
0440     struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
0441     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
0442     union acpi_object in_obj, in_buf, *out_obj;
0443     const struct nd_cmd_desc *desc = NULL;
0444     struct device *dev = acpi_desc->dev;
0445     struct nd_cmd_pkg *call_pkg = NULL;
0446     const char *cmd_name, *dimm_name;
0447     unsigned long cmd_mask, dsm_mask;
0448     u32 offset, fw_status = 0;
0449     acpi_handle handle;
0450     const guid_t *guid;
0451     int func, rc, i;
0452     int family = 0;
0453 
0454     if (cmd_rc)
0455         *cmd_rc = -EINVAL;
0456 
0457     if (cmd == ND_CMD_CALL)
0458         call_pkg = buf;
0459     func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
0460     if (func < 0)
0461         return func;
0462 
0463     if (nvdimm) {
0464         struct acpi_device *adev = nfit_mem->adev;
0465 
0466         if (!adev)
0467             return -ENOTTY;
0468 
0469         dimm_name = nvdimm_name(nvdimm);
0470         cmd_name = nvdimm_cmd_name(cmd);
0471         cmd_mask = nvdimm_cmd_mask(nvdimm);
0472         dsm_mask = nfit_mem->dsm_mask;
0473         desc = nd_cmd_dimm_desc(cmd);
0474         guid = to_nfit_uuid(nfit_mem->family);
0475         handle = adev->handle;
0476     } else {
0477         struct acpi_device *adev = to_acpi_dev(acpi_desc);
0478 
0479         cmd_name = nvdimm_bus_cmd_name(cmd);
0480         cmd_mask = nd_desc->cmd_mask;
0481         if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
0482             family = call_pkg->nd_family;
0483             if (family > NVDIMM_BUS_FAMILY_MAX ||
0484                 !test_bit(family, &nd_desc->bus_family_mask))
0485                 return -EINVAL;
0486             family = array_index_nospec(family,
0487                             NVDIMM_BUS_FAMILY_MAX + 1);
0488             dsm_mask = acpi_desc->family_dsm_mask[family];
0489             guid = to_nfit_bus_uuid(family);
0490         } else {
0491             dsm_mask = acpi_desc->bus_dsm_mask;
0492             guid = to_nfit_uuid(NFIT_DEV_BUS);
0493         }
0494         desc = nd_cmd_bus_desc(cmd);
0495         handle = adev->handle;
0496         dimm_name = "bus";
0497     }
0498 
0499     if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
0500         return -ENOTTY;
0501 
0502     /*
0503      * Check for a valid command.  For ND_CMD_CALL, we also have to
0504      * make sure that the DSM function is supported.
0505      */
0506     if (cmd == ND_CMD_CALL &&
0507         (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
0508         return -ENOTTY;
0509     else if (!test_bit(cmd, &cmd_mask))
0510         return -ENOTTY;
0511 
0512     in_obj.type = ACPI_TYPE_PACKAGE;
0513     in_obj.package.count = 1;
0514     in_obj.package.elements = &in_buf;
0515     in_buf.type = ACPI_TYPE_BUFFER;
0516     in_buf.buffer.pointer = buf;
0517     in_buf.buffer.length = 0;
0518 
0519     /* libnvdimm has already validated the input envelope */
0520     for (i = 0; i < desc->in_num; i++)
0521         in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
0522                 i, buf);
0523 
0524     if (call_pkg) {
0525         /* skip over package wrapper */
0526         in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
0527         in_buf.buffer.length = call_pkg->nd_size_in;
0528     }
0529 
0530     dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
0531         dimm_name, cmd, family, func, in_buf.buffer.length);
0532     if (payload_dumpable(nvdimm, func))
0533         print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
0534                 in_buf.buffer.pointer,
0535                 min_t(u32, 256, in_buf.buffer.length), true);
0536 
0537     /* call the BIOS, prefer the named methods over _DSM if available */
0538     if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
0539             && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
0540         out_obj = acpi_label_info(handle);
0541     else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
0542             && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
0543         struct nd_cmd_get_config_data_hdr *p = buf;
0544 
0545         out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
0546     } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
0547             && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
0548         struct nd_cmd_set_config_hdr *p = buf;
0549 
0550         out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
0551                 p->in_buf);
0552     } else {
0553         u8 revid;
0554 
0555         if (nvdimm)
0556             revid = nfit_dsm_revid(nfit_mem->family, func);
0557         else
0558             revid = 1;
0559         out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
0560     }
0561 
0562     if (!out_obj) {
0563         dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
0564         return -EINVAL;
0565     }
0566 
0567     if (out_obj->type != ACPI_TYPE_BUFFER) {
0568         dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
0569                 dimm_name, cmd_name, out_obj->type);
0570         rc = -EINVAL;
0571         goto out;
0572     }
0573 
0574     dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
0575             cmd_name, out_obj->buffer.length);
0576     print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
0577             out_obj->buffer.pointer,
0578             min_t(u32, 128, out_obj->buffer.length), true);
0579 
0580     if (call_pkg) {
0581         call_pkg->nd_fw_size = out_obj->buffer.length;
0582         memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
0583             out_obj->buffer.pointer,
0584             min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
0585 
0586         ACPI_FREE(out_obj);
0587         /*
0588          * Need to support FW function w/o known size in advance.
0589          * Caller can determine required size based upon nd_fw_size.
0590          * If we return an error (like elsewhere) then caller wouldn't
0591          * be able to rely upon data returned to make calculation.
0592          */
0593         if (cmd_rc)
0594             *cmd_rc = 0;
0595         return 0;
0596     }
0597 
0598     for (i = 0, offset = 0; i < desc->out_num; i++) {
0599         u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
0600                 (u32 *) out_obj->buffer.pointer,
0601                 out_obj->buffer.length - offset);
0602 
0603         if (offset + out_size > out_obj->buffer.length) {
0604             dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
0605                     dimm_name, cmd_name, i);
0606             break;
0607         }
0608 
0609         if (in_buf.buffer.length + offset + out_size > buf_len) {
0610             dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
0611                     dimm_name, cmd_name, i);
0612             rc = -ENXIO;
0613             goto out;
0614         }
0615         memcpy(buf + in_buf.buffer.length + offset,
0616                 out_obj->buffer.pointer + offset, out_size);
0617         offset += out_size;
0618     }
0619 
0620     /*
0621      * Set fw_status for all the commands with a known format to be
0622      * later interpreted by xlat_status().
0623      */
0624     if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
0625                     && cmd <= ND_CMD_CLEAR_ERROR)
0626                 || (nvdimm && cmd >= ND_CMD_SMART
0627                     && cmd <= ND_CMD_VENDOR)))
0628         fw_status = *(u32 *) out_obj->buffer.pointer;
0629 
0630     if (offset + in_buf.buffer.length < buf_len) {
0631         if (i >= 1) {
0632             /*
0633              * status valid, return the number of bytes left
0634              * unfilled in the output buffer
0635              */
0636             rc = buf_len - offset - in_buf.buffer.length;
0637             if (cmd_rc)
0638                 *cmd_rc = xlat_status(nvdimm, buf, cmd,
0639                         fw_status);
0640         } else {
0641             dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
0642                     __func__, dimm_name, cmd_name, buf_len,
0643                     offset);
0644             rc = -ENXIO;
0645         }
0646     } else {
0647         rc = 0;
0648         if (cmd_rc)
0649             *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
0650     }
0651 
0652  out:
0653     ACPI_FREE(out_obj);
0654 
0655     return rc;
0656 }
0657 EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
0658 
0659 static const char *spa_type_name(u16 type)
0660 {
0661     static const char *to_name[] = {
0662         [NFIT_SPA_VOLATILE] = "volatile",
0663         [NFIT_SPA_PM] = "pmem",
0664         [NFIT_SPA_DCR] = "dimm-control-region",
0665         [NFIT_SPA_BDW] = "block-data-window",
0666         [NFIT_SPA_VDISK] = "volatile-disk",
0667         [NFIT_SPA_VCD] = "volatile-cd",
0668         [NFIT_SPA_PDISK] = "persistent-disk",
0669         [NFIT_SPA_PCD] = "persistent-cd",
0670 
0671     };
0672 
0673     if (type > NFIT_SPA_PCD)
0674         return "unknown";
0675 
0676     return to_name[type];
0677 }
0678 
0679 int nfit_spa_type(struct acpi_nfit_system_address *spa)
0680 {
0681     guid_t guid;
0682     int i;
0683 
0684     import_guid(&guid, spa->range_guid);
0685     for (i = 0; i < NFIT_UUID_MAX; i++)
0686         if (guid_equal(to_nfit_uuid(i), &guid))
0687             return i;
0688     return -1;
0689 }
0690 
0691 static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
0692 {
0693     if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID)
0694         return sizeof(*spa);
0695     return sizeof(*spa) - 8;
0696 }
0697 
0698 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
0699         struct nfit_table_prev *prev,
0700         struct acpi_nfit_system_address *spa)
0701 {
0702     struct device *dev = acpi_desc->dev;
0703     struct nfit_spa *nfit_spa;
0704 
0705     if (spa->header.length != sizeof_spa(spa))
0706         return false;
0707 
0708     list_for_each_entry(nfit_spa, &prev->spas, list) {
0709         if (memcmp(nfit_spa->spa, spa, sizeof_spa(spa)) == 0) {
0710             list_move_tail(&nfit_spa->list, &acpi_desc->spas);
0711             return true;
0712         }
0713     }
0714 
0715     nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof_spa(spa),
0716             GFP_KERNEL);
0717     if (!nfit_spa)
0718         return false;
0719     INIT_LIST_HEAD(&nfit_spa->list);
0720     memcpy(nfit_spa->spa, spa, sizeof_spa(spa));
0721     list_add_tail(&nfit_spa->list, &acpi_desc->spas);
0722     dev_dbg(dev, "spa index: %d type: %s\n",
0723             spa->range_index,
0724             spa_type_name(nfit_spa_type(spa)));
0725     return true;
0726 }
0727 
0728 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
0729         struct nfit_table_prev *prev,
0730         struct acpi_nfit_memory_map *memdev)
0731 {
0732     struct device *dev = acpi_desc->dev;
0733     struct nfit_memdev *nfit_memdev;
0734 
0735     if (memdev->header.length != sizeof(*memdev))
0736         return false;
0737 
0738     list_for_each_entry(nfit_memdev, &prev->memdevs, list)
0739         if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
0740             list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
0741             return true;
0742         }
0743 
0744     nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
0745             GFP_KERNEL);
0746     if (!nfit_memdev)
0747         return false;
0748     INIT_LIST_HEAD(&nfit_memdev->list);
0749     memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
0750     list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
0751     dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
0752             memdev->device_handle, memdev->range_index,
0753             memdev->region_index, memdev->flags);
0754     return true;
0755 }
0756 
0757 int nfit_get_smbios_id(u32 device_handle, u16 *flags)
0758 {
0759     struct acpi_nfit_memory_map *memdev;
0760     struct acpi_nfit_desc *acpi_desc;
0761     struct nfit_mem *nfit_mem;
0762     u16 physical_id;
0763 
0764     mutex_lock(&acpi_desc_lock);
0765     list_for_each_entry(acpi_desc, &acpi_descs, list) {
0766         mutex_lock(&acpi_desc->init_mutex);
0767         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
0768             memdev = __to_nfit_memdev(nfit_mem);
0769             if (memdev->device_handle == device_handle) {
0770                 *flags = memdev->flags;
0771                 physical_id = memdev->physical_id;
0772                 mutex_unlock(&acpi_desc->init_mutex);
0773                 mutex_unlock(&acpi_desc_lock);
0774                 return physical_id;
0775             }
0776         }
0777         mutex_unlock(&acpi_desc->init_mutex);
0778     }
0779     mutex_unlock(&acpi_desc_lock);
0780 
0781     return -ENODEV;
0782 }
0783 EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
0784 
0785 /*
0786  * An implementation may provide a truncated control region if no block windows
0787  * are defined.
0788  */
0789 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
0790 {
0791     if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
0792                 window_size))
0793         return 0;
0794     if (dcr->windows)
0795         return sizeof(*dcr);
0796     return offsetof(struct acpi_nfit_control_region, window_size);
0797 }
0798 
0799 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
0800         struct nfit_table_prev *prev,
0801         struct acpi_nfit_control_region *dcr)
0802 {
0803     struct device *dev = acpi_desc->dev;
0804     struct nfit_dcr *nfit_dcr;
0805 
0806     if (!sizeof_dcr(dcr))
0807         return false;
0808 
0809     list_for_each_entry(nfit_dcr, &prev->dcrs, list)
0810         if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
0811             list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
0812             return true;
0813         }
0814 
0815     nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
0816             GFP_KERNEL);
0817     if (!nfit_dcr)
0818         return false;
0819     INIT_LIST_HEAD(&nfit_dcr->list);
0820     memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
0821     list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
0822     dev_dbg(dev, "dcr index: %d windows: %d\n",
0823             dcr->region_index, dcr->windows);
0824     return true;
0825 }
0826 
0827 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
0828         struct nfit_table_prev *prev,
0829         struct acpi_nfit_data_region *bdw)
0830 {
0831     struct device *dev = acpi_desc->dev;
0832     struct nfit_bdw *nfit_bdw;
0833 
0834     if (bdw->header.length != sizeof(*bdw))
0835         return false;
0836     list_for_each_entry(nfit_bdw, &prev->bdws, list)
0837         if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
0838             list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
0839             return true;
0840         }
0841 
0842     nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
0843             GFP_KERNEL);
0844     if (!nfit_bdw)
0845         return false;
0846     INIT_LIST_HEAD(&nfit_bdw->list);
0847     memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
0848     list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
0849     dev_dbg(dev, "bdw dcr: %d windows: %d\n",
0850             bdw->region_index, bdw->windows);
0851     return true;
0852 }
0853 
0854 static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
0855 {
0856     if (idt->header.length < sizeof(*idt))
0857         return 0;
0858     return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
0859 }
0860 
0861 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
0862         struct nfit_table_prev *prev,
0863         struct acpi_nfit_interleave *idt)
0864 {
0865     struct device *dev = acpi_desc->dev;
0866     struct nfit_idt *nfit_idt;
0867 
0868     if (!sizeof_idt(idt))
0869         return false;
0870 
0871     list_for_each_entry(nfit_idt, &prev->idts, list) {
0872         if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
0873             continue;
0874 
0875         if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
0876             list_move_tail(&nfit_idt->list, &acpi_desc->idts);
0877             return true;
0878         }
0879     }
0880 
0881     nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
0882             GFP_KERNEL);
0883     if (!nfit_idt)
0884         return false;
0885     INIT_LIST_HEAD(&nfit_idt->list);
0886     memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
0887     list_add_tail(&nfit_idt->list, &acpi_desc->idts);
0888     dev_dbg(dev, "idt index: %d num_lines: %d\n",
0889             idt->interleave_index, idt->line_count);
0890     return true;
0891 }
0892 
0893 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
0894 {
0895     if (flush->header.length < sizeof(*flush))
0896         return 0;
0897     return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
0898 }
0899 
0900 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
0901         struct nfit_table_prev *prev,
0902         struct acpi_nfit_flush_address *flush)
0903 {
0904     struct device *dev = acpi_desc->dev;
0905     struct nfit_flush *nfit_flush;
0906 
0907     if (!sizeof_flush(flush))
0908         return false;
0909 
0910     list_for_each_entry(nfit_flush, &prev->flushes, list) {
0911         if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
0912             continue;
0913 
0914         if (memcmp(nfit_flush->flush, flush,
0915                     sizeof_flush(flush)) == 0) {
0916             list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
0917             return true;
0918         }
0919     }
0920 
0921     nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
0922             + sizeof_flush(flush), GFP_KERNEL);
0923     if (!nfit_flush)
0924         return false;
0925     INIT_LIST_HEAD(&nfit_flush->list);
0926     memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
0927     list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
0928     dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
0929             flush->device_handle, flush->hint_count);
0930     return true;
0931 }
0932 
0933 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
0934         struct acpi_nfit_capabilities *pcap)
0935 {
0936     struct device *dev = acpi_desc->dev;
0937     u32 mask;
0938 
0939     mask = (1 << (pcap->highest_capability + 1)) - 1;
0940     acpi_desc->platform_cap = pcap->capabilities & mask;
0941     dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
0942     return true;
0943 }
0944 
0945 static void *add_table(struct acpi_nfit_desc *acpi_desc,
0946         struct nfit_table_prev *prev, void *table, const void *end)
0947 {
0948     struct device *dev = acpi_desc->dev;
0949     struct acpi_nfit_header *hdr;
0950     void *err = ERR_PTR(-ENOMEM);
0951 
0952     if (table >= end)
0953         return NULL;
0954 
0955     hdr = table;
0956     if (!hdr->length) {
0957         dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
0958             hdr->type);
0959         return NULL;
0960     }
0961 
0962     switch (hdr->type) {
0963     case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
0964         if (!add_spa(acpi_desc, prev, table))
0965             return err;
0966         break;
0967     case ACPI_NFIT_TYPE_MEMORY_MAP:
0968         if (!add_memdev(acpi_desc, prev, table))
0969             return err;
0970         break;
0971     case ACPI_NFIT_TYPE_CONTROL_REGION:
0972         if (!add_dcr(acpi_desc, prev, table))
0973             return err;
0974         break;
0975     case ACPI_NFIT_TYPE_DATA_REGION:
0976         if (!add_bdw(acpi_desc, prev, table))
0977             return err;
0978         break;
0979     case ACPI_NFIT_TYPE_INTERLEAVE:
0980         if (!add_idt(acpi_desc, prev, table))
0981             return err;
0982         break;
0983     case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
0984         if (!add_flush(acpi_desc, prev, table))
0985             return err;
0986         break;
0987     case ACPI_NFIT_TYPE_SMBIOS:
0988         dev_dbg(dev, "smbios\n");
0989         break;
0990     case ACPI_NFIT_TYPE_CAPABILITIES:
0991         if (!add_platform_cap(acpi_desc, table))
0992             return err;
0993         break;
0994     default:
0995         dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
0996         break;
0997     }
0998 
0999     return table + hdr->length;
1000 }
1001 
1002 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
1003         struct acpi_nfit_system_address *spa)
1004 {
1005     struct nfit_mem *nfit_mem, *found;
1006     struct nfit_memdev *nfit_memdev;
1007     int type = spa ? nfit_spa_type(spa) : 0;
1008 
1009     switch (type) {
1010     case NFIT_SPA_DCR:
1011     case NFIT_SPA_PM:
1012         break;
1013     default:
1014         if (spa)
1015             return 0;
1016     }
1017 
1018     /*
1019      * This loop runs in two modes, when a dimm is mapped the loop
1020      * adds memdev associations to an existing dimm, or creates a
1021      * dimm. In the unmapped dimm case this loop sweeps for memdev
1022      * instances with an invalid / zero range_index and adds those
1023      * dimms without spa associations.
1024      */
1025     list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1026         struct nfit_flush *nfit_flush;
1027         struct nfit_dcr *nfit_dcr;
1028         u32 device_handle;
1029         u16 dcr;
1030 
1031         if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1032             continue;
1033         if (!spa && nfit_memdev->memdev->range_index)
1034             continue;
1035         found = NULL;
1036         dcr = nfit_memdev->memdev->region_index;
1037         device_handle = nfit_memdev->memdev->device_handle;
1038         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1039             if (__to_nfit_memdev(nfit_mem)->device_handle
1040                     == device_handle) {
1041                 found = nfit_mem;
1042                 break;
1043             }
1044 
1045         if (found)
1046             nfit_mem = found;
1047         else {
1048             nfit_mem = devm_kzalloc(acpi_desc->dev,
1049                     sizeof(*nfit_mem), GFP_KERNEL);
1050             if (!nfit_mem)
1051                 return -ENOMEM;
1052             INIT_LIST_HEAD(&nfit_mem->list);
1053             nfit_mem->acpi_desc = acpi_desc;
1054             list_add(&nfit_mem->list, &acpi_desc->dimms);
1055         }
1056 
1057         list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1058             if (nfit_dcr->dcr->region_index != dcr)
1059                 continue;
1060             /*
1061              * Record the control region for the dimm.  For
1062              * the ACPI 6.1 case, where there are separate
1063              * control regions for the pmem vs blk
1064              * interfaces, be sure to record the extended
1065              * blk details.
1066              */
1067             if (!nfit_mem->dcr)
1068                 nfit_mem->dcr = nfit_dcr->dcr;
1069             else if (nfit_mem->dcr->windows == 0
1070                     && nfit_dcr->dcr->windows)
1071                 nfit_mem->dcr = nfit_dcr->dcr;
1072             break;
1073         }
1074 
1075         list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1076             struct acpi_nfit_flush_address *flush;
1077             u16 i;
1078 
1079             if (nfit_flush->flush->device_handle != device_handle)
1080                 continue;
1081             nfit_mem->nfit_flush = nfit_flush;
1082             flush = nfit_flush->flush;
1083             nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
1084                     flush->hint_count,
1085                     sizeof(struct resource),
1086                     GFP_KERNEL);
1087             if (!nfit_mem->flush_wpq)
1088                 return -ENOMEM;
1089             for (i = 0; i < flush->hint_count; i++) {
1090                 struct resource *res = &nfit_mem->flush_wpq[i];
1091 
1092                 res->start = flush->hint_address[i];
1093                 res->end = res->start + 8 - 1;
1094             }
1095             break;
1096         }
1097 
1098         if (dcr && !nfit_mem->dcr) {
1099             dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1100                     spa->range_index, dcr);
1101             return -ENODEV;
1102         }
1103 
1104         if (type == NFIT_SPA_DCR) {
1105             struct nfit_idt *nfit_idt;
1106             u16 idt_idx;
1107 
1108             /* multiple dimms may share a SPA when interleaved */
1109             nfit_mem->spa_dcr = spa;
1110             nfit_mem->memdev_dcr = nfit_memdev->memdev;
1111             idt_idx = nfit_memdev->memdev->interleave_index;
1112             list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1113                 if (nfit_idt->idt->interleave_index != idt_idx)
1114                     continue;
1115                 nfit_mem->idt_dcr = nfit_idt->idt;
1116                 break;
1117             }
1118         } else if (type == NFIT_SPA_PM) {
1119             /*
1120              * A single dimm may belong to multiple SPA-PM
1121              * ranges, record at least one in addition to
1122              * any SPA-DCR range.
1123              */
1124             nfit_mem->memdev_pmem = nfit_memdev->memdev;
1125         } else
1126             nfit_mem->memdev_dcr = nfit_memdev->memdev;
1127     }
1128 
1129     return 0;
1130 }
1131 
1132 static int nfit_mem_cmp(void *priv, const struct list_head *_a,
1133         const struct list_head *_b)
1134 {
1135     struct nfit_mem *a = container_of(_a, typeof(*a), list);
1136     struct nfit_mem *b = container_of(_b, typeof(*b), list);
1137     u32 handleA, handleB;
1138 
1139     handleA = __to_nfit_memdev(a)->device_handle;
1140     handleB = __to_nfit_memdev(b)->device_handle;
1141     if (handleA < handleB)
1142         return -1;
1143     else if (handleA > handleB)
1144         return 1;
1145     return 0;
1146 }
1147 
1148 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1149 {
1150     struct nfit_spa *nfit_spa;
1151     int rc;
1152 
1153 
1154     /*
1155      * For each SPA-DCR or SPA-PMEM address range find its
1156      * corresponding MEMDEV(s).  From each MEMDEV find the
1157      * corresponding DCR.  Then, if we're operating on a SPA-DCR,
1158      * try to find a SPA-BDW and a corresponding BDW that references
1159      * the DCR.  Throw it all into an nfit_mem object.  Note, that
1160      * BDWs are optional.
1161      */
1162     list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1163         rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1164         if (rc)
1165             return rc;
1166     }
1167 
1168     /*
1169      * If a DIMM has failed to be mapped into SPA there will be no
1170      * SPA entries above. Find and register all the unmapped DIMMs
1171      * for reporting and recovery purposes.
1172      */
1173     rc = __nfit_mem_init(acpi_desc, NULL);
1174     if (rc)
1175         return rc;
1176 
1177     list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1178 
1179     return 0;
1180 }
1181 
1182 static ssize_t bus_dsm_mask_show(struct device *dev,
1183         struct device_attribute *attr, char *buf)
1184 {
1185     struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1186     struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1187     struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1188 
1189     return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
1190 }
1191 static struct device_attribute dev_attr_bus_dsm_mask =
1192         __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1193 
1194 static ssize_t revision_show(struct device *dev,
1195         struct device_attribute *attr, char *buf)
1196 {
1197     struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1198     struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1199     struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1200 
1201     return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1202 }
1203 static DEVICE_ATTR_RO(revision);
1204 
1205 static ssize_t hw_error_scrub_show(struct device *dev,
1206         struct device_attribute *attr, char *buf)
1207 {
1208     struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1209     struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1210     struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1211 
1212     return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1213 }
1214 
1215 /*
1216  * The 'hw_error_scrub' attribute can have the following values written to it:
1217  * '0': Switch to the default mode where an exception will only insert
1218  *      the address of the memory error into the poison and badblocks lists.
1219  * '1': Enable a full scrub to happen if an exception for a memory error is
1220  *      received.
1221  */
1222 static ssize_t hw_error_scrub_store(struct device *dev,
1223         struct device_attribute *attr, const char *buf, size_t size)
1224 {
1225     struct nvdimm_bus_descriptor *nd_desc;
1226     ssize_t rc;
1227     long val;
1228 
1229     rc = kstrtol(buf, 0, &val);
1230     if (rc)
1231         return rc;
1232 
1233     device_lock(dev);
1234     nd_desc = dev_get_drvdata(dev);
1235     if (nd_desc) {
1236         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1237 
1238         switch (val) {
1239         case HW_ERROR_SCRUB_ON:
1240             acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1241             break;
1242         case HW_ERROR_SCRUB_OFF:
1243             acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1244             break;
1245         default:
1246             rc = -EINVAL;
1247             break;
1248         }
1249     }
1250     device_unlock(dev);
1251     if (rc)
1252         return rc;
1253     return size;
1254 }
1255 static DEVICE_ATTR_RW(hw_error_scrub);
1256 
1257 /*
1258  * This shows the number of full Address Range Scrubs that have been
1259  * completed since driver load time. Userspace can wait on this using
1260  * select/poll etc. A '+' at the end indicates an ARS is in progress
1261  */
1262 static ssize_t scrub_show(struct device *dev,
1263         struct device_attribute *attr, char *buf)
1264 {
1265     struct nvdimm_bus_descriptor *nd_desc;
1266     struct acpi_nfit_desc *acpi_desc;
1267     ssize_t rc = -ENXIO;
1268     bool busy;
1269 
1270     device_lock(dev);
1271     nd_desc = dev_get_drvdata(dev);
1272     if (!nd_desc) {
1273         device_unlock(dev);
1274         return rc;
1275     }
1276     acpi_desc = to_acpi_desc(nd_desc);
1277 
1278     mutex_lock(&acpi_desc->init_mutex);
1279     busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
1280         && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
1281     rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
1282     /* Allow an admin to poll the busy state at a higher rate */
1283     if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
1284                 &acpi_desc->scrub_flags)) {
1285         acpi_desc->scrub_tmo = 1;
1286         mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
1287     }
1288 
1289     mutex_unlock(&acpi_desc->init_mutex);
1290     device_unlock(dev);
1291     return rc;
1292 }
1293 
1294 static ssize_t scrub_store(struct device *dev,
1295         struct device_attribute *attr, const char *buf, size_t size)
1296 {
1297     struct nvdimm_bus_descriptor *nd_desc;
1298     ssize_t rc;
1299     long val;
1300 
1301     rc = kstrtol(buf, 0, &val);
1302     if (rc)
1303         return rc;
1304     if (val != 1)
1305         return -EINVAL;
1306 
1307     device_lock(dev);
1308     nd_desc = dev_get_drvdata(dev);
1309     if (nd_desc) {
1310         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1311 
1312         rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
1313     }
1314     device_unlock(dev);
1315     if (rc)
1316         return rc;
1317     return size;
1318 }
1319 static DEVICE_ATTR_RW(scrub);
1320 
1321 static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1322 {
1323     struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1324     const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1325         | 1 << ND_CMD_ARS_STATUS;
1326 
1327     return (nd_desc->cmd_mask & mask) == mask;
1328 }
1329 
1330 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1331 {
1332     struct device *dev = kobj_to_dev(kobj);
1333     struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1334 
1335     if (a == &dev_attr_scrub.attr)
1336         return ars_supported(nvdimm_bus) ? a->mode : 0;
1337 
1338     if (a == &dev_attr_firmware_activate_noidle.attr)
1339         return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
1340 
1341     return a->mode;
1342 }
1343 
1344 static struct attribute *acpi_nfit_attributes[] = {
1345     &dev_attr_revision.attr,
1346     &dev_attr_scrub.attr,
1347     &dev_attr_hw_error_scrub.attr,
1348     &dev_attr_bus_dsm_mask.attr,
1349     &dev_attr_firmware_activate_noidle.attr,
1350     NULL,
1351 };
1352 
1353 static const struct attribute_group acpi_nfit_attribute_group = {
1354     .name = "nfit",
1355     .attrs = acpi_nfit_attributes,
1356     .is_visible = nfit_visible,
1357 };
1358 
1359 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1360     &acpi_nfit_attribute_group,
1361     NULL,
1362 };
1363 
1364 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1365 {
1366     struct nvdimm *nvdimm = to_nvdimm(dev);
1367     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1368 
1369     return __to_nfit_memdev(nfit_mem);
1370 }
1371 
1372 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1373 {
1374     struct nvdimm *nvdimm = to_nvdimm(dev);
1375     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1376 
1377     return nfit_mem->dcr;
1378 }
1379 
1380 static ssize_t handle_show(struct device *dev,
1381         struct device_attribute *attr, char *buf)
1382 {
1383     struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1384 
1385     return sprintf(buf, "%#x\n", memdev->device_handle);
1386 }
1387 static DEVICE_ATTR_RO(handle);
1388 
1389 static ssize_t phys_id_show(struct device *dev,
1390         struct device_attribute *attr, char *buf)
1391 {
1392     struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1393 
1394     return sprintf(buf, "%#x\n", memdev->physical_id);
1395 }
1396 static DEVICE_ATTR_RO(phys_id);
1397 
1398 static ssize_t vendor_show(struct device *dev,
1399         struct device_attribute *attr, char *buf)
1400 {
1401     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1402 
1403     return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1404 }
1405 static DEVICE_ATTR_RO(vendor);
1406 
1407 static ssize_t rev_id_show(struct device *dev,
1408         struct device_attribute *attr, char *buf)
1409 {
1410     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1411 
1412     return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1413 }
1414 static DEVICE_ATTR_RO(rev_id);
1415 
1416 static ssize_t device_show(struct device *dev,
1417         struct device_attribute *attr, char *buf)
1418 {
1419     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1420 
1421     return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1422 }
1423 static DEVICE_ATTR_RO(device);
1424 
1425 static ssize_t subsystem_vendor_show(struct device *dev,
1426         struct device_attribute *attr, char *buf)
1427 {
1428     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1429 
1430     return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1431 }
1432 static DEVICE_ATTR_RO(subsystem_vendor);
1433 
1434 static ssize_t subsystem_rev_id_show(struct device *dev,
1435         struct device_attribute *attr, char *buf)
1436 {
1437     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1438 
1439     return sprintf(buf, "0x%04x\n",
1440             be16_to_cpu(dcr->subsystem_revision_id));
1441 }
1442 static DEVICE_ATTR_RO(subsystem_rev_id);
1443 
1444 static ssize_t subsystem_device_show(struct device *dev,
1445         struct device_attribute *attr, char *buf)
1446 {
1447     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1448 
1449     return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1450 }
1451 static DEVICE_ATTR_RO(subsystem_device);
1452 
1453 static int num_nvdimm_formats(struct nvdimm *nvdimm)
1454 {
1455     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1456     int formats = 0;
1457 
1458     if (nfit_mem->memdev_pmem)
1459         formats++;
1460     return formats;
1461 }
1462 
1463 static ssize_t format_show(struct device *dev,
1464         struct device_attribute *attr, char *buf)
1465 {
1466     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1467 
1468     return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1469 }
1470 static DEVICE_ATTR_RO(format);
1471 
1472 static ssize_t format1_show(struct device *dev,
1473         struct device_attribute *attr, char *buf)
1474 {
1475     u32 handle;
1476     ssize_t rc = -ENXIO;
1477     struct nfit_mem *nfit_mem;
1478     struct nfit_memdev *nfit_memdev;
1479     struct acpi_nfit_desc *acpi_desc;
1480     struct nvdimm *nvdimm = to_nvdimm(dev);
1481     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1482 
1483     nfit_mem = nvdimm_provider_data(nvdimm);
1484     acpi_desc = nfit_mem->acpi_desc;
1485     handle = to_nfit_memdev(dev)->device_handle;
1486 
1487     /* assumes DIMMs have at most 2 published interface codes */
1488     mutex_lock(&acpi_desc->init_mutex);
1489     list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1490         struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1491         struct nfit_dcr *nfit_dcr;
1492 
1493         if (memdev->device_handle != handle)
1494             continue;
1495 
1496         list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1497             if (nfit_dcr->dcr->region_index != memdev->region_index)
1498                 continue;
1499             if (nfit_dcr->dcr->code == dcr->code)
1500                 continue;
1501             rc = sprintf(buf, "0x%04x\n",
1502                     le16_to_cpu(nfit_dcr->dcr->code));
1503             break;
1504         }
1505         if (rc != -ENXIO)
1506             break;
1507     }
1508     mutex_unlock(&acpi_desc->init_mutex);
1509     return rc;
1510 }
1511 static DEVICE_ATTR_RO(format1);
1512 
1513 static ssize_t formats_show(struct device *dev,
1514         struct device_attribute *attr, char *buf)
1515 {
1516     struct nvdimm *nvdimm = to_nvdimm(dev);
1517 
1518     return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1519 }
1520 static DEVICE_ATTR_RO(formats);
1521 
1522 static ssize_t serial_show(struct device *dev,
1523         struct device_attribute *attr, char *buf)
1524 {
1525     struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1526 
1527     return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1528 }
1529 static DEVICE_ATTR_RO(serial);
1530 
1531 static ssize_t family_show(struct device *dev,
1532         struct device_attribute *attr, char *buf)
1533 {
1534     struct nvdimm *nvdimm = to_nvdimm(dev);
1535     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1536 
1537     if (nfit_mem->family < 0)
1538         return -ENXIO;
1539     return sprintf(buf, "%d\n", nfit_mem->family);
1540 }
1541 static DEVICE_ATTR_RO(family);
1542 
1543 static ssize_t dsm_mask_show(struct device *dev,
1544         struct device_attribute *attr, char *buf)
1545 {
1546     struct nvdimm *nvdimm = to_nvdimm(dev);
1547     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1548 
1549     if (nfit_mem->family < 0)
1550         return -ENXIO;
1551     return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1552 }
1553 static DEVICE_ATTR_RO(dsm_mask);
1554 
1555 static ssize_t flags_show(struct device *dev,
1556         struct device_attribute *attr, char *buf)
1557 {
1558     struct nvdimm *nvdimm = to_nvdimm(dev);
1559     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1560     u16 flags = __to_nfit_memdev(nfit_mem)->flags;
1561 
1562     if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
1563         flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
1564 
1565     return sprintf(buf, "%s%s%s%s%s%s%s\n",
1566         flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1567         flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1568         flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1569         flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1570         flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1571         flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1572         flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1573 }
1574 static DEVICE_ATTR_RO(flags);
1575 
1576 static ssize_t id_show(struct device *dev,
1577         struct device_attribute *attr, char *buf)
1578 {
1579     struct nvdimm *nvdimm = to_nvdimm(dev);
1580     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1581 
1582     return sprintf(buf, "%s\n", nfit_mem->id);
1583 }
1584 static DEVICE_ATTR_RO(id);
1585 
1586 static ssize_t dirty_shutdown_show(struct device *dev,
1587         struct device_attribute *attr, char *buf)
1588 {
1589     struct nvdimm *nvdimm = to_nvdimm(dev);
1590     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1591 
1592     return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
1593 }
1594 static DEVICE_ATTR_RO(dirty_shutdown);
1595 
1596 static struct attribute *acpi_nfit_dimm_attributes[] = {
1597     &dev_attr_handle.attr,
1598     &dev_attr_phys_id.attr,
1599     &dev_attr_vendor.attr,
1600     &dev_attr_device.attr,
1601     &dev_attr_rev_id.attr,
1602     &dev_attr_subsystem_vendor.attr,
1603     &dev_attr_subsystem_device.attr,
1604     &dev_attr_subsystem_rev_id.attr,
1605     &dev_attr_format.attr,
1606     &dev_attr_formats.attr,
1607     &dev_attr_format1.attr,
1608     &dev_attr_serial.attr,
1609     &dev_attr_flags.attr,
1610     &dev_attr_id.attr,
1611     &dev_attr_family.attr,
1612     &dev_attr_dsm_mask.attr,
1613     &dev_attr_dirty_shutdown.attr,
1614     NULL,
1615 };
1616 
1617 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1618         struct attribute *a, int n)
1619 {
1620     struct device *dev = kobj_to_dev(kobj);
1621     struct nvdimm *nvdimm = to_nvdimm(dev);
1622     struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1623 
1624     if (!to_nfit_dcr(dev)) {
1625         /* Without a dcr only the memdev attributes can be surfaced */
1626         if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1627                 || a == &dev_attr_flags.attr
1628                 || a == &dev_attr_family.attr
1629                 || a == &dev_attr_dsm_mask.attr)
1630             return a->mode;
1631         return 0;
1632     }
1633 
1634     if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1635         return 0;
1636 
1637     if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
1638             && a == &dev_attr_dirty_shutdown.attr)
1639         return 0;
1640 
1641     return a->mode;
1642 }
1643 
1644 static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1645     .name = "nfit",
1646     .attrs = acpi_nfit_dimm_attributes,
1647     .is_visible = acpi_nfit_dimm_attr_visible,
1648 };
1649 
1650 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1651     &acpi_nfit_dimm_attribute_group,
1652     NULL,
1653 };
1654 
1655 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1656         u32 device_handle)
1657 {
1658     struct nfit_mem *nfit_mem;
1659 
1660     list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1661         if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1662             return nfit_mem->nvdimm;
1663 
1664     return NULL;
1665 }
1666 
1667 void __acpi_nvdimm_notify(struct device *dev, u32 event)
1668 {
1669     struct nfit_mem *nfit_mem;
1670     struct acpi_nfit_desc *acpi_desc;
1671 
1672     dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1673             event);
1674 
1675     if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1676         dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1677                 event);
1678         return;
1679     }
1680 
1681     acpi_desc = dev_get_drvdata(dev->parent);
1682     if (!acpi_desc)
1683         return;
1684 
1685     /*
1686      * If we successfully retrieved acpi_desc, then we know nfit_mem data
1687      * is still valid.
1688      */
1689     nfit_mem = dev_get_drvdata(dev);
1690     if (nfit_mem && nfit_mem->flags_attr)
1691         sysfs_notify_dirent(nfit_mem->flags_attr);
1692 }
1693 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1694 
1695 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1696 {
1697     struct acpi_device *adev = data;
1698     struct device *dev = &adev->dev;
1699 
1700     device_lock(dev->parent);
1701     __acpi_nvdimm_notify(dev, event);
1702     device_unlock(dev->parent);
1703 }
1704 
1705 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1706 {
1707     acpi_handle handle;
1708     acpi_status status;
1709 
1710     status = acpi_get_handle(adev->handle, method, &handle);
1711 
1712     if (ACPI_SUCCESS(status))
1713         return true;
1714     return false;
1715 }
1716 
1717 __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1718 {
1719     struct device *dev = &nfit_mem->adev->dev;
1720     struct nd_intel_smart smart = { 0 };
1721     union acpi_object in_buf = {
1722         .buffer.type = ACPI_TYPE_BUFFER,
1723         .buffer.length = 0,
1724     };
1725     union acpi_object in_obj = {
1726         .package.type = ACPI_TYPE_PACKAGE,
1727         .package.count = 1,
1728         .package.elements = &in_buf,
1729     };
1730     const u8 func = ND_INTEL_SMART;
1731     const guid_t *guid = to_nfit_uuid(nfit_mem->family);
1732     u8 revid = nfit_dsm_revid(nfit_mem->family, func);
1733     struct acpi_device *adev = nfit_mem->adev;
1734     acpi_handle handle = adev->handle;
1735     union acpi_object *out_obj;
1736 
1737     if ((nfit_mem->dsm_mask & (1 << func)) == 0)
1738         return;
1739 
1740     out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1741     if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
1742             || out_obj->buffer.length < sizeof(smart)) {
1743         dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
1744                 dev_name(dev));
1745         ACPI_FREE(out_obj);
1746         return;
1747     }
1748     memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
1749     ACPI_FREE(out_obj);
1750 
1751     if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1752         if (smart.shutdown_state)
1753             set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
1754     }
1755 
1756     if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
1757         set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1758         nfit_mem->dirty_shutdown = smart.shutdown_count;
1759     }
1760 }
1761 
1762 static void populate_shutdown_status(struct nfit_mem *nfit_mem)
1763 {
1764     /*
1765      * For DIMMs that provide a dynamic facility to retrieve a
1766      * dirty-shutdown status and/or a dirty-shutdown count, cache
1767      * these values in nfit_mem.
1768      */
1769     if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1770         nfit_intel_shutdown_status(nfit_mem);
1771 }
1772 
1773 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1774         struct nfit_mem *nfit_mem, u32 device_handle)
1775 {
1776     struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1777     struct acpi_device *adev, *adev_dimm;
1778     struct device *dev = acpi_desc->dev;
1779     unsigned long dsm_mask, label_mask;
1780     const guid_t *guid;
1781     int i;
1782     int family = -1;
1783     struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
1784 
1785     /* nfit test assumes 1:1 relationship between commands and dsms */
1786     nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1787     nfit_mem->family = NVDIMM_FAMILY_INTEL;
1788     set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
1789 
1790     if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1791         sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
1792                 be16_to_cpu(dcr->vendor_id),
1793                 dcr->manufacturing_location,
1794                 be16_to_cpu(dcr->manufacturing_date),
1795                 be32_to_cpu(dcr->serial_number));
1796     else
1797         sprintf(nfit_mem->id, "%04x-%08x",
1798                 be16_to_cpu(dcr->vendor_id),
1799                 be32_to_cpu(dcr->serial_number));
1800 
1801     adev = to_acpi_dev(acpi_desc);
1802     if (!adev) {
1803         /* unit test case */
1804         populate_shutdown_status(nfit_mem);
1805         return 0;
1806     }
1807 
1808     adev_dimm = acpi_find_child_device(adev, device_handle, false);
1809     nfit_mem->adev = adev_dimm;
1810     if (!adev_dimm) {
1811         dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1812                 device_handle);
1813         return force_enable_dimms ? 0 : -ENODEV;
1814     }
1815 
1816     if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1817         ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1818         dev_err(dev, "%s: notification registration failed\n",
1819                 dev_name(&adev_dimm->dev));
1820         return -ENXIO;
1821     }
1822     /*
1823      * Record nfit_mem for the notification path to track back to
1824      * the nfit sysfs attributes for this dimm device object.
1825      */
1826     dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1827 
1828     /*
1829      * There are 4 "legacy" NVDIMM command sets
1830      * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
1831      * an EFI working group was established to constrain this
1832      * proliferation. The nfit driver probes for the supported command
1833      * set by GUID. Note, if you're a platform developer looking to add
1834      * a new command set to this probe, consider using an existing set,
1835      * or otherwise seek approval to publish the command set at
1836      * http://www.uefi.org/RFIC_LIST.
1837      *
1838      * Note, that checking for function0 (bit0) tells us if any commands
1839      * are reachable through this GUID.
1840      */
1841     clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
1842     for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1843         if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
1844             set_bit(i, &nd_desc->dimm_family_mask);
1845             if (family < 0 || i == default_dsm_family)
1846                 family = i;
1847         }
1848 
1849     /* limit the supported commands to those that are publicly documented */
1850     nfit_mem->family = family;
1851     if (override_dsm_mask && !disable_vendor_specific)
1852         dsm_mask = override_dsm_mask;
1853     else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1854         dsm_mask = NVDIMM_INTEL_CMDMASK;
1855         if (disable_vendor_specific)
1856             dsm_mask &= ~(1 << ND_CMD_VENDOR);
1857     } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1858         dsm_mask = 0x1c3c76;
1859     } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1860         dsm_mask = 0x1fe;
1861         if (disable_vendor_specific)
1862             dsm_mask &= ~(1 << 8);
1863     } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1864         dsm_mask = 0xffffffff;
1865     } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
1866         dsm_mask = 0x1f;
1867     } else {
1868         dev_dbg(dev, "unknown dimm command family\n");
1869         nfit_mem->family = -1;
1870         /* DSMs are optional, continue loading the driver... */
1871         return 0;
1872     }
1873 
1874     /*
1875      * Function 0 is the command interrogation function, don't
1876      * export it to potential userspace use, and enable it to be
1877      * used as an error value in acpi_nfit_ctl().
1878      */
1879     dsm_mask &= ~1UL;
1880 
1881     guid = to_nfit_uuid(nfit_mem->family);
1882     for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1883         if (acpi_check_dsm(adev_dimm->handle, guid,
1884                     nfit_dsm_revid(nfit_mem->family, i),
1885                     1ULL << i))
1886             set_bit(i, &nfit_mem->dsm_mask);
1887 
1888     /*
1889      * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1890      * due to their better semantics handling locked capacity.
1891      */
1892     label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1893         | 1 << ND_CMD_SET_CONFIG_DATA;
1894     if (family == NVDIMM_FAMILY_INTEL
1895             && (dsm_mask & label_mask) == label_mask)
1896         /* skip _LS{I,R,W} enabling */;
1897     else {
1898         if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1899                 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1900             dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1901             set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1902         }
1903 
1904         if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1905                 && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1906             dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1907             set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1908         }
1909 
1910         /*
1911          * Quirk read-only label configurations to preserve
1912          * access to label-less namespaces by default.
1913          */
1914         if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
1915                 && !force_labels) {
1916             dev_dbg(dev, "%s: No _LSW, disable labels\n",
1917                     dev_name(&adev_dimm->dev));
1918             clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1919         } else
1920             dev_dbg(dev, "%s: Force enable labels\n",
1921                     dev_name(&adev_dimm->dev));
1922     }
1923 
1924     populate_shutdown_status(nfit_mem);
1925 
1926     return 0;
1927 }
1928 
1929 static void shutdown_dimm_notify(void *data)
1930 {
1931     struct acpi_nfit_desc *acpi_desc = data;
1932     struct nfit_mem *nfit_mem;
1933 
1934     mutex_lock(&acpi_desc->init_mutex);
1935     /*
1936      * Clear out the nfit_mem->flags_attr and shut down dimm event
1937      * notifications.
1938      */
1939     list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1940         struct acpi_device *adev_dimm = nfit_mem->adev;
1941 
1942         if (nfit_mem->flags_attr) {
1943             sysfs_put(nfit_mem->flags_attr);
1944             nfit_mem->flags_attr = NULL;
1945         }
1946         if (adev_dimm) {
1947             acpi_remove_notify_handler(adev_dimm->handle,
1948                     ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1949             dev_set_drvdata(&adev_dimm->dev, NULL);
1950         }
1951     }
1952     mutex_unlock(&acpi_desc->init_mutex);
1953 }
1954 
1955 static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
1956 {
1957     switch (family) {
1958     case NVDIMM_FAMILY_INTEL:
1959         return intel_security_ops;
1960     default:
1961         return NULL;
1962     }
1963 }
1964 
1965 static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
1966         struct nfit_mem *nfit_mem)
1967 {
1968     unsigned long mask;
1969     struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
1970     struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1971 
1972     if (!nd_desc->fw_ops)
1973         return NULL;
1974 
1975     if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
1976         return NULL;
1977 
1978     mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
1979     if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
1980         return NULL;
1981 
1982     return intel_fw_ops;
1983 }
1984 
1985 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1986 {
1987     struct nfit_mem *nfit_mem;
1988     int dimm_count = 0, rc;
1989     struct nvdimm *nvdimm;
1990 
1991     list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1992         struct acpi_nfit_flush_address *flush;
1993         unsigned long flags = 0, cmd_mask;
1994         struct nfit_memdev *nfit_memdev;
1995         u32 device_handle;
1996         u16 mem_flags;
1997 
1998         device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1999         nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
2000         if (nvdimm) {
2001             dimm_count++;
2002             continue;
2003         }
2004 
2005         /* collate flags across all memdevs for this dimm */
2006         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2007             struct acpi_nfit_memory_map *dimm_memdev;
2008 
2009             dimm_memdev = __to_nfit_memdev(nfit_mem);
2010             if (dimm_memdev->device_handle
2011                     != nfit_memdev->memdev->device_handle)
2012                 continue;
2013             dimm_memdev->flags |= nfit_memdev->memdev->flags;
2014         }
2015 
2016         mem_flags = __to_nfit_memdev(nfit_mem)->flags;
2017         if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
2018             set_bit(NDD_UNARMED, &flags);
2019 
2020         rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
2021         if (rc)
2022             continue;
2023 
2024         /*
2025          * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
2026          * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
2027          * userspace interface.
2028          */
2029         cmd_mask = 1UL << ND_CMD_CALL;
2030         if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
2031             /*
2032              * These commands have a 1:1 correspondence
2033              * between DSM payload and libnvdimm ioctl
2034              * payload format.
2035              */
2036             cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
2037         }
2038 
2039         if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
2040             set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
2041             set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
2042         }
2043         if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
2044             set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
2045 
2046         flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
2047             : NULL;
2048         nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
2049                 acpi_nfit_dimm_attribute_groups,
2050                 flags, cmd_mask, flush ? flush->hint_count : 0,
2051                 nfit_mem->flush_wpq, &nfit_mem->id[0],
2052                 acpi_nfit_get_security_ops(nfit_mem->family),
2053                 acpi_nfit_get_fw_ops(nfit_mem));
2054         if (!nvdimm)
2055             return -ENOMEM;
2056 
2057         nfit_mem->nvdimm = nvdimm;
2058         dimm_count++;
2059 
2060         if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
2061             continue;
2062 
2063         dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
2064                 nvdimm_name(nvdimm),
2065           mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
2066           mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
2067           mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
2068           mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
2069           mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
2070 
2071     }
2072 
2073     rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
2074     if (rc)
2075         return rc;
2076 
2077     /*
2078      * Now that dimms are successfully registered, and async registration
2079      * is flushed, attempt to enable event notification.
2080      */
2081     list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2082         struct kernfs_node *nfit_kernfs;
2083 
2084         nvdimm = nfit_mem->nvdimm;
2085         if (!nvdimm)
2086             continue;
2087 
2088         nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2089         if (nfit_kernfs)
2090             nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
2091                     "flags");
2092         sysfs_put(nfit_kernfs);
2093         if (!nfit_mem->flags_attr)
2094             dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
2095                     nvdimm_name(nvdimm));
2096     }
2097 
2098     return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
2099             acpi_desc);
2100 }
2101 
2102 /*
2103  * These constants are private because there are no kernel consumers of
2104  * these commands.
2105  */
2106 enum nfit_aux_cmds {
2107     NFIT_CMD_TRANSLATE_SPA = 5,
2108     NFIT_CMD_ARS_INJECT_SET = 7,
2109     NFIT_CMD_ARS_INJECT_CLEAR = 8,
2110     NFIT_CMD_ARS_INJECT_GET = 9,
2111 };
2112 
2113 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
2114 {
2115     struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2116     const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
2117     unsigned long dsm_mask, *mask;
2118     struct acpi_device *adev;
2119     int i;
2120 
2121     set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
2122     set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
2123 
2124     /* enable nfit_test to inject bus command emulation */
2125     if (acpi_desc->bus_cmd_force_en) {
2126         nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2127         mask = &nd_desc->bus_family_mask;
2128         if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
2129             set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
2130             nd_desc->fw_ops = intel_bus_fw_ops;
2131         }
2132     }
2133 
2134     adev = to_acpi_dev(acpi_desc);
2135     if (!adev)
2136         return;
2137 
2138     for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
2139         if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2140             set_bit(i, &nd_desc->cmd_mask);
2141 
2142     dsm_mask =
2143         (1 << ND_CMD_ARS_CAP) |
2144         (1 << ND_CMD_ARS_START) |
2145         (1 << ND_CMD_ARS_STATUS) |
2146         (1 << ND_CMD_CLEAR_ERROR) |
2147         (1 << NFIT_CMD_TRANSLATE_SPA) |
2148         (1 << NFIT_CMD_ARS_INJECT_SET) |
2149         (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
2150         (1 << NFIT_CMD_ARS_INJECT_GET);
2151     for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2152         if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2153             set_bit(i, &acpi_desc->bus_dsm_mask);
2154 
2155     /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
2156     dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
2157     guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
2158     mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
2159     for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2160         if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2161             set_bit(i, mask);
2162 
2163     if (*mask == dsm_mask) {
2164         set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
2165         nd_desc->fw_ops = intel_bus_fw_ops;
2166     }
2167 }
2168 
2169 static ssize_t range_index_show(struct device *dev,
2170         struct device_attribute *attr, char *buf)
2171 {
2172     struct nd_region *nd_region = to_nd_region(dev);
2173     struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
2174 
2175     return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
2176 }
2177 static DEVICE_ATTR_RO(range_index);
2178 
2179 static struct attribute *acpi_nfit_region_attributes[] = {
2180     &dev_attr_range_index.attr,
2181     NULL,
2182 };
2183 
2184 static const struct attribute_group acpi_nfit_region_attribute_group = {
2185     .name = "nfit",
2186     .attrs = acpi_nfit_region_attributes,
2187 };
2188 
2189 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2190     &acpi_nfit_region_attribute_group,
2191     NULL,
2192 };
2193 
2194 /* enough info to uniquely specify an interleave set */
2195 struct nfit_set_info {
2196     u64 region_offset;
2197     u32 serial_number;
2198     u32 pad;
2199 };
2200 
2201 struct nfit_set_info2 {
2202     u64 region_offset;
2203     u32 serial_number;
2204     u16 vendor_id;
2205     u16 manufacturing_date;
2206     u8 manufacturing_location;
2207     u8 reserved[31];
2208 };
2209 
2210 static int cmp_map_compat(const void *m0, const void *m1)
2211 {
2212     const struct nfit_set_info *map0 = m0;
2213     const struct nfit_set_info *map1 = m1;
2214 
2215     return memcmp(&map0->region_offset, &map1->region_offset,
2216             sizeof(u64));
2217 }
2218 
2219 static int cmp_map(const void *m0, const void *m1)
2220 {
2221     const struct nfit_set_info *map0 = m0;
2222     const struct nfit_set_info *map1 = m1;
2223 
2224     if (map0->region_offset < map1->region_offset)
2225         return -1;
2226     else if (map0->region_offset > map1->region_offset)
2227         return 1;
2228     return 0;
2229 }
2230 
2231 static int cmp_map2(const void *m0, const void *m1)
2232 {
2233     const struct nfit_set_info2 *map0 = m0;
2234     const struct nfit_set_info2 *map1 = m1;
2235 
2236     if (map0->region_offset < map1->region_offset)
2237         return -1;
2238     else if (map0->region_offset > map1->region_offset)
2239         return 1;
2240     return 0;
2241 }
2242 
2243 /* Retrieve the nth entry referencing this spa */
2244 static struct acpi_nfit_memory_map *memdev_from_spa(
2245         struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2246 {
2247     struct nfit_memdev *nfit_memdev;
2248 
2249     list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2250         if (nfit_memdev->memdev->range_index == range_index)
2251             if (n-- == 0)
2252                 return nfit_memdev->memdev;
2253     return NULL;
2254 }
2255 
2256 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2257         struct nd_region_desc *ndr_desc,
2258         struct acpi_nfit_system_address *spa)
2259 {
2260     struct device *dev = acpi_desc->dev;
2261     struct nd_interleave_set *nd_set;
2262     u16 nr = ndr_desc->num_mappings;
2263     struct nfit_set_info2 *info2;
2264     struct nfit_set_info *info;
2265     int i;
2266 
2267     nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2268     if (!nd_set)
2269         return -ENOMEM;
2270     import_guid(&nd_set->type_guid, spa->range_guid);
2271 
2272     info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL);
2273     if (!info)
2274         return -ENOMEM;
2275 
2276     info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL);
2277     if (!info2)
2278         return -ENOMEM;
2279 
2280     for (i = 0; i < nr; i++) {
2281         struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2282         struct nvdimm *nvdimm = mapping->nvdimm;
2283         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2284         struct nfit_set_info *map = &info[i];
2285         struct nfit_set_info2 *map2 = &info2[i];
2286         struct acpi_nfit_memory_map *memdev =
2287             memdev_from_spa(acpi_desc, spa->range_index, i);
2288         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2289 
2290         if (!memdev || !nfit_mem->dcr) {
2291             dev_err(dev, "%s: failed to find DCR\n", __func__);
2292             return -ENODEV;
2293         }
2294 
2295         map->region_offset = memdev->region_offset;
2296         map->serial_number = dcr->serial_number;
2297 
2298         map2->region_offset = memdev->region_offset;
2299         map2->serial_number = dcr->serial_number;
2300         map2->vendor_id = dcr->vendor_id;
2301         map2->manufacturing_date = dcr->manufacturing_date;
2302         map2->manufacturing_location = dcr->manufacturing_location;
2303     }
2304 
2305     /* v1.1 namespaces */
2306     sort(info, nr, sizeof(*info), cmp_map, NULL);
2307     nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0);
2308 
2309     /* v1.2 namespaces */
2310     sort(info2, nr, sizeof(*info2), cmp_map2, NULL);
2311     nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0);
2312 
2313     /* support v1.1 namespaces created with the wrong sort order */
2314     sort(info, nr, sizeof(*info), cmp_map_compat, NULL);
2315     nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0);
2316 
2317     /* record the result of the sort for the mapping position */
2318     for (i = 0; i < nr; i++) {
2319         struct nfit_set_info2 *map2 = &info2[i];
2320         int j;
2321 
2322         for (j = 0; j < nr; j++) {
2323             struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2324             struct nvdimm *nvdimm = mapping->nvdimm;
2325             struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2326             struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2327 
2328             if (map2->serial_number == dcr->serial_number &&
2329                 map2->vendor_id == dcr->vendor_id &&
2330                 map2->manufacturing_date == dcr->manufacturing_date &&
2331                 map2->manufacturing_location
2332                     == dcr->manufacturing_location) {
2333                 mapping->position = i;
2334                 break;
2335             }
2336         }
2337     }
2338 
2339     ndr_desc->nd_set = nd_set;
2340     devm_kfree(dev, info);
2341     devm_kfree(dev, info2);
2342 
2343     return 0;
2344 }
2345 
2346 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2347         struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2348 {
2349     struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2350     struct acpi_nfit_system_address *spa = nfit_spa->spa;
2351     int cmd_rc, rc;
2352 
2353     cmd->address = spa->address;
2354     cmd->length = spa->length;
2355     rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2356             sizeof(*cmd), &cmd_rc);
2357     if (rc < 0)
2358         return rc;
2359     return cmd_rc;
2360 }
2361 
2362 static int ars_start(struct acpi_nfit_desc *acpi_desc,
2363         struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
2364 {
2365     int rc;
2366     int cmd_rc;
2367     struct nd_cmd_ars_start ars_start;
2368     struct acpi_nfit_system_address *spa = nfit_spa->spa;
2369     struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2370 
2371     memset(&ars_start, 0, sizeof(ars_start));
2372     ars_start.address = spa->address;
2373     ars_start.length = spa->length;
2374     if (req_type == ARS_REQ_SHORT)
2375         ars_start.flags = ND_ARS_RETURN_PREV_DATA;
2376     if (nfit_spa_type(spa) == NFIT_SPA_PM)
2377         ars_start.type = ND_ARS_PERSISTENT;
2378     else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2379         ars_start.type = ND_ARS_VOLATILE;
2380     else
2381         return -ENOTTY;
2382 
2383     rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2384             sizeof(ars_start), &cmd_rc);
2385 
2386     if (rc < 0)
2387         return rc;
2388     if (cmd_rc < 0)
2389         return cmd_rc;
2390     set_bit(ARS_VALID, &acpi_desc->scrub_flags);
2391     return 0;
2392 }
2393 
2394 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2395 {
2396     int rc, cmd_rc;
2397     struct nd_cmd_ars_start ars_start;
2398     struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2399     struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2400 
2401     ars_start = (struct nd_cmd_ars_start) {
2402         .address = ars_status->restart_address,
2403         .length = ars_status->restart_length,
2404         .type = ars_status->type,
2405     };
2406     rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2407             sizeof(ars_start), &cmd_rc);
2408     if (rc < 0)
2409         return rc;
2410     return cmd_rc;
2411 }
2412 
2413 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2414 {
2415     struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2416     struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2417     int rc, cmd_rc;
2418 
2419     rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2420             acpi_desc->max_ars, &cmd_rc);
2421     if (rc < 0)
2422         return rc;
2423     return cmd_rc;
2424 }
2425 
2426 static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2427         struct nfit_spa *nfit_spa)
2428 {
2429     struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2430     struct acpi_nfit_system_address *spa = nfit_spa->spa;
2431     struct nd_region *nd_region = nfit_spa->nd_region;
2432     struct device *dev;
2433 
2434     lockdep_assert_held(&acpi_desc->init_mutex);
2435     /*
2436      * Only advance the ARS state for ARS runs initiated by the
2437      * kernel, ignore ARS results from BIOS initiated runs for scrub
2438      * completion tracking.
2439      */
2440     if (acpi_desc->scrub_spa != nfit_spa)
2441         return;
2442 
2443     if ((ars_status->address >= spa->address && ars_status->address
2444                 < spa->address + spa->length)
2445             || (ars_status->address < spa->address)) {
2446         /*
2447          * Assume that if a scrub starts at an offset from the
2448          * start of nfit_spa that we are in the continuation
2449          * case.
2450          *
2451          * Otherwise, if the scrub covers the spa range, mark
2452          * any pending request complete.
2453          */
2454         if (ars_status->address + ars_status->length
2455                 >= spa->address + spa->length)
2456                 /* complete */;
2457         else
2458             return;
2459     } else
2460         return;
2461 
2462     acpi_desc->scrub_spa = NULL;
2463     if (nd_region) {
2464         dev = nd_region_dev(nd_region);
2465         nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
2466     } else
2467         dev = acpi_desc->dev;
2468     dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
2469 }
2470 
2471 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2472 {
2473     struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2474     struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2475     int rc;
2476     u32 i;
2477 
2478     /*
2479      * First record starts at 44 byte offset from the start of the
2480      * payload.
2481      */
2482     if (ars_status->out_length < 44)
2483         return 0;
2484 
2485     /*
2486      * Ignore potentially stale results that are only refreshed
2487      * after a start-ARS event.
2488      */
2489     if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
2490         dev_dbg(acpi_desc->dev, "skip %d stale records\n",
2491                 ars_status->num_records);
2492         return 0;
2493     }
2494 
2495     for (i = 0; i < ars_status->num_records; i++) {
2496         /* only process full records */
2497         if (ars_status->out_length
2498                 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2499             break;
2500         rc = nvdimm_bus_add_badrange(nvdimm_bus,
2501                 ars_status->records[i].err_address,
2502                 ars_status->records[i].length);
2503         if (rc)
2504             return rc;
2505     }
2506     if (i < ars_status->num_records)
2507         dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2508 
2509     return 0;
2510 }
2511 
2512 static void acpi_nfit_remove_resource(void *data)
2513 {
2514     struct resource *res = data;
2515 
2516     remove_resource(res);
2517 }
2518 
2519 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2520         struct nd_region_desc *ndr_desc)
2521 {
2522     struct resource *res, *nd_res = ndr_desc->res;
2523     int is_pmem, ret;
2524 
2525     /* No operation if the region is already registered as PMEM */
2526     is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2527                 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2528     if (is_pmem == REGION_INTERSECTS)
2529         return 0;
2530 
2531     res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2532     if (!res)
2533         return -ENOMEM;
2534 
2535     res->name = "Persistent Memory";
2536     res->start = nd_res->start;
2537     res->end = nd_res->end;
2538     res->flags = IORESOURCE_MEM;
2539     res->desc = IORES_DESC_PERSISTENT_MEMORY;
2540 
2541     ret = insert_resource(&iomem_resource, res);
2542     if (ret)
2543         return ret;
2544 
2545     ret = devm_add_action_or_reset(acpi_desc->dev,
2546                     acpi_nfit_remove_resource,
2547                     res);
2548     if (ret)
2549         return ret;
2550 
2551     return 0;
2552 }
2553 
2554 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2555         struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2556         struct acpi_nfit_memory_map *memdev,
2557         struct nfit_spa *nfit_spa)
2558 {
2559     struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2560             memdev->device_handle);
2561     struct acpi_nfit_system_address *spa = nfit_spa->spa;
2562 
2563     if (!nvdimm) {
2564         dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2565                 spa->range_index, memdev->device_handle);
2566         return -ENODEV;
2567     }
2568 
2569     mapping->nvdimm = nvdimm;
2570     switch (nfit_spa_type(spa)) {
2571     case NFIT_SPA_PM:
2572     case NFIT_SPA_VOLATILE:
2573         mapping->start = memdev->address;
2574         mapping->size = memdev->region_size;
2575         break;
2576     }
2577 
2578     return 0;
2579 }
2580 
2581 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2582 {
2583     return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2584         nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2585         nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2586         nfit_spa_type(spa) == NFIT_SPA_PCD);
2587 }
2588 
2589 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2590 {
2591     return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2592         nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2593         nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2594 }
2595 
2596 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2597         struct nfit_spa *nfit_spa)
2598 {
2599     static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2600     struct acpi_nfit_system_address *spa = nfit_spa->spa;
2601     struct nd_region_desc *ndr_desc, _ndr_desc;
2602     struct nfit_memdev *nfit_memdev;
2603     struct nvdimm_bus *nvdimm_bus;
2604     struct resource res;
2605     int count = 0, rc;
2606 
2607     if (nfit_spa->nd_region)
2608         return 0;
2609 
2610     if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2611         dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
2612         return 0;
2613     }
2614 
2615     memset(&res, 0, sizeof(res));
2616     memset(&mappings, 0, sizeof(mappings));
2617     memset(&_ndr_desc, 0, sizeof(_ndr_desc));
2618     res.start = spa->address;
2619     res.end = res.start + spa->length - 1;
2620     ndr_desc = &_ndr_desc;
2621     ndr_desc->res = &res;
2622     ndr_desc->provider_data = nfit_spa;
2623     ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2624     if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
2625         ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain);
2626         ndr_desc->target_node = pxm_to_node(spa->proximity_domain);
2627     } else {
2628         ndr_desc->numa_node = NUMA_NO_NODE;
2629         ndr_desc->target_node = NUMA_NO_NODE;
2630     }
2631 
2632     /* Fallback to address based numa information if node lookup failed */
2633     if (ndr_desc->numa_node == NUMA_NO_NODE) {
2634         ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
2635         dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
2636             NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
2637     }
2638     if (ndr_desc->target_node == NUMA_NO_NODE) {
2639         ndr_desc->target_node = phys_to_target_node(spa->address);
2640         dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
2641             NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
2642     }
2643 
2644     /*
2645      * Persistence domain bits are hierarchical, if
2646      * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2647      * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2648      */
2649     if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2650         set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2651     else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2652         set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2653 
2654     list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2655         struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2656         struct nd_mapping_desc *mapping;
2657 
2658         /* range index 0 == unmapped in SPA or invalid-SPA */
2659         if (memdev->range_index == 0 || spa->range_index == 0)
2660             continue;
2661         if (memdev->range_index != spa->range_index)
2662             continue;
2663         if (count >= ND_MAX_MAPPINGS) {
2664             dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2665                     spa->range_index, ND_MAX_MAPPINGS);
2666             return -ENXIO;
2667         }
2668         mapping = &mappings[count++];
2669         rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2670                 memdev, nfit_spa);
2671         if (rc)
2672             goto out;
2673     }
2674 
2675     ndr_desc->mapping = mappings;
2676     ndr_desc->num_mappings = count;
2677     rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2678     if (rc)
2679         goto out;
2680 
2681     nvdimm_bus = acpi_desc->nvdimm_bus;
2682     if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2683         rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2684         if (rc) {
2685             dev_warn(acpi_desc->dev,
2686                 "failed to insert pmem resource to iomem: %d\n",
2687                 rc);
2688             goto out;
2689         }
2690 
2691         nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2692                 ndr_desc);
2693         if (!nfit_spa->nd_region)
2694             rc = -ENOMEM;
2695     } else if (nfit_spa_is_volatile(spa)) {
2696         nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2697                 ndr_desc);
2698         if (!nfit_spa->nd_region)
2699             rc = -ENOMEM;
2700     } else if (nfit_spa_is_virtual(spa)) {
2701         nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2702                 ndr_desc);
2703         if (!nfit_spa->nd_region)
2704             rc = -ENOMEM;
2705     }
2706 
2707  out:
2708     if (rc)
2709         dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2710                 nfit_spa->spa->range_index);
2711     return rc;
2712 }
2713 
2714 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
2715 {
2716     struct device *dev = acpi_desc->dev;
2717     struct nd_cmd_ars_status *ars_status;
2718 
2719     if (acpi_desc->ars_status) {
2720         memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
2721         return 0;
2722     }
2723 
2724     ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
2725     if (!ars_status)
2726         return -ENOMEM;
2727     acpi_desc->ars_status = ars_status;
2728     return 0;
2729 }
2730 
2731 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
2732 {
2733     int rc;
2734 
2735     if (ars_status_alloc(acpi_desc))
2736         return -ENOMEM;
2737 
2738     rc = ars_get_status(acpi_desc);
2739 
2740     if (rc < 0 && rc != -ENOSPC)
2741         return rc;
2742 
2743     if (ars_status_process_records(acpi_desc))
2744         dev_err(acpi_desc->dev, "Failed to process ARS records\n");
2745 
2746     return rc;
2747 }
2748 
2749 static int ars_register(struct acpi_nfit_desc *acpi_desc,
2750         struct nfit_spa *nfit_spa)
2751 {
2752     int rc;
2753 
2754     if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
2755         return acpi_nfit_register_region(acpi_desc, nfit_spa);
2756 
2757     set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
2758     if (!no_init_ars)
2759         set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
2760 
2761     switch (acpi_nfit_query_poison(acpi_desc)) {
2762     case 0:
2763     case -ENOSPC:
2764     case -EAGAIN:
2765         rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
2766         /* shouldn't happen, try again later */
2767         if (rc == -EBUSY)
2768             break;
2769         if (rc) {
2770             set_bit(ARS_FAILED, &nfit_spa->ars_state);
2771             break;
2772         }
2773         clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
2774         rc = acpi_nfit_query_poison(acpi_desc);
2775         if (rc)
2776             break;
2777         acpi_desc->scrub_spa = nfit_spa;
2778         ars_complete(acpi_desc, nfit_spa);
2779         /*
2780          * If ars_complete() says we didn't complete the
2781          * short scrub, we'll try again with a long
2782          * request.
2783          */
2784         acpi_desc->scrub_spa = NULL;
2785         break;
2786     case -EBUSY:
2787     case -ENOMEM:
2788         /*
2789          * BIOS was using ARS, wait for it to complete (or
2790          * resources to become available) and then perform our
2791          * own scrubs.
2792          */
2793         break;
2794     default:
2795         set_bit(ARS_FAILED, &nfit_spa->ars_state);
2796         break;
2797     }
2798 
2799     return acpi_nfit_register_region(acpi_desc, nfit_spa);
2800 }
2801 
2802 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
2803 {
2804     struct nfit_spa *nfit_spa;
2805 
2806     list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2807         if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
2808             continue;
2809         ars_complete(acpi_desc, nfit_spa);
2810     }
2811 }
2812 
2813 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
2814         int query_rc)
2815 {
2816     unsigned int tmo = acpi_desc->scrub_tmo;
2817     struct device *dev = acpi_desc->dev;
2818     struct nfit_spa *nfit_spa;
2819 
2820     lockdep_assert_held(&acpi_desc->init_mutex);
2821 
2822     if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
2823         return 0;
2824 
2825     if (query_rc == -EBUSY) {
2826         dev_dbg(dev, "ARS: ARS busy\n");
2827         return min(30U * 60U, tmo * 2);
2828     }
2829     if (query_rc == -ENOSPC) {
2830         dev_dbg(dev, "ARS: ARS continue\n");
2831         ars_continue(acpi_desc);
2832         return 1;
2833     }
2834     if (query_rc && query_rc != -EAGAIN) {
2835         unsigned long long addr, end;
2836 
2837         addr = acpi_desc->ars_status->address;
2838         end = addr + acpi_desc->ars_status->length;
2839         dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
2840                 query_rc);
2841     }
2842 
2843     ars_complete_all(acpi_desc);
2844     list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2845         enum nfit_ars_state req_type;
2846         int rc;
2847 
2848         if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
2849             continue;
2850 
2851         /* prefer short ARS requests first */
2852         if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
2853             req_type = ARS_REQ_SHORT;
2854         else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
2855             req_type = ARS_REQ_LONG;
2856         else
2857             continue;
2858         rc = ars_start(acpi_desc, nfit_spa, req_type);
2859 
2860         dev = nd_region_dev(nfit_spa->nd_region);
2861         dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
2862                 nfit_spa->spa->range_index,
2863                 req_type == ARS_REQ_SHORT ? "short" : "long",
2864                 rc);
2865         /*
2866          * Hmm, we raced someone else starting ARS? Try again in
2867          * a bit.
2868          */
2869         if (rc == -EBUSY)
2870             return 1;
2871         if (rc == 0) {
2872             dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
2873                     "scrub start while range %d active\n",
2874                     acpi_desc->scrub_spa->spa->range_index);
2875             clear_bit(req_type, &nfit_spa->ars_state);
2876             acpi_desc->scrub_spa = nfit_spa;
2877             /*
2878              * Consider this spa last for future scrub
2879              * requests
2880              */
2881             list_move_tail(&nfit_spa->list, &acpi_desc->spas);
2882             return 1;
2883         }
2884 
2885         dev_err(dev, "ARS: range %d ARS failed (%d)\n",
2886                 nfit_spa->spa->range_index, rc);
2887         set_bit(ARS_FAILED, &nfit_spa->ars_state);
2888     }
2889     return 0;
2890 }
2891 
2892 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
2893 {
2894     lockdep_assert_held(&acpi_desc->init_mutex);
2895 
2896     set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
2897     /* note this should only be set from within the workqueue */
2898     if (tmo)
2899         acpi_desc->scrub_tmo = tmo;
2900     queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
2901 }
2902 
2903 static void sched_ars(struct acpi_nfit_desc *acpi_desc)
2904 {
2905     __sched_ars(acpi_desc, 0);
2906 }
2907 
2908 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
2909 {
2910     lockdep_assert_held(&acpi_desc->init_mutex);
2911 
2912     clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
2913     acpi_desc->scrub_count++;
2914     if (acpi_desc->scrub_count_state)
2915         sysfs_notify_dirent(acpi_desc->scrub_count_state);
2916 }
2917 
2918 static void acpi_nfit_scrub(struct work_struct *work)
2919 {
2920     struct acpi_nfit_desc *acpi_desc;
2921     unsigned int tmo;
2922     int query_rc;
2923 
2924     acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
2925     mutex_lock(&acpi_desc->init_mutex);
2926     query_rc = acpi_nfit_query_poison(acpi_desc);
2927     tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
2928     if (tmo)
2929         __sched_ars(acpi_desc, tmo);
2930     else
2931         notify_ars_done(acpi_desc);
2932     memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
2933     clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
2934     mutex_unlock(&acpi_desc->init_mutex);
2935 }
2936 
2937 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
2938         struct nfit_spa *nfit_spa)
2939 {
2940     int type = nfit_spa_type(nfit_spa->spa);
2941     struct nd_cmd_ars_cap ars_cap;
2942     int rc;
2943 
2944     set_bit(ARS_FAILED, &nfit_spa->ars_state);
2945     memset(&ars_cap, 0, sizeof(ars_cap));
2946     rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2947     if (rc < 0)
2948         return;
2949     /* check that the supported scrub types match the spa type */
2950     if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
2951                 & ND_ARS_VOLATILE) == 0)
2952         return;
2953     if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
2954                 & ND_ARS_PERSISTENT) == 0)
2955         return;
2956 
2957     nfit_spa->max_ars = ars_cap.max_ars_out;
2958     nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2959     acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
2960     clear_bit(ARS_FAILED, &nfit_spa->ars_state);
2961 }
2962 
2963 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2964 {
2965     struct nfit_spa *nfit_spa;
2966     int rc, do_sched_ars = 0;
2967 
2968     set_bit(ARS_VALID, &acpi_desc->scrub_flags);
2969     list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2970         switch (nfit_spa_type(nfit_spa->spa)) {
2971         case NFIT_SPA_VOLATILE:
2972         case NFIT_SPA_PM:
2973             acpi_nfit_init_ars(acpi_desc, nfit_spa);
2974             break;
2975         }
2976     }
2977 
2978     list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2979         switch (nfit_spa_type(nfit_spa->spa)) {
2980         case NFIT_SPA_VOLATILE:
2981         case NFIT_SPA_PM:
2982             /* register regions and kick off initial ARS run */
2983             rc = ars_register(acpi_desc, nfit_spa);
2984             if (rc)
2985                 return rc;
2986 
2987             /*
2988              * Kick off background ARS if at least one
2989              * region successfully registered ARS
2990              */
2991             if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
2992                 do_sched_ars++;
2993             break;
2994         case NFIT_SPA_BDW:
2995             /* nothing to register */
2996             break;
2997         case NFIT_SPA_DCR:
2998         case NFIT_SPA_VDISK:
2999         case NFIT_SPA_VCD:
3000         case NFIT_SPA_PDISK:
3001         case NFIT_SPA_PCD:
3002             /* register known regions that don't support ARS */
3003             rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3004             if (rc)
3005                 return rc;
3006             break;
3007         default:
3008             /* don't register unknown regions */
3009             break;
3010         }
3011     }
3012 
3013     if (do_sched_ars)
3014         sched_ars(acpi_desc);
3015     return 0;
3016 }
3017 
3018 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3019         struct nfit_table_prev *prev)
3020 {
3021     struct device *dev = acpi_desc->dev;
3022 
3023     if (!list_empty(&prev->spas) ||
3024             !list_empty(&prev->memdevs) ||
3025             !list_empty(&prev->dcrs) ||
3026             !list_empty(&prev->bdws) ||
3027             !list_empty(&prev->idts) ||
3028             !list_empty(&prev->flushes)) {
3029         dev_err(dev, "new nfit deletes entries (unsupported)\n");
3030         return -ENXIO;
3031     }
3032     return 0;
3033 }
3034 
3035 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3036 {
3037     struct device *dev = acpi_desc->dev;
3038     struct kernfs_node *nfit;
3039     struct device *bus_dev;
3040 
3041     if (!ars_supported(acpi_desc->nvdimm_bus))
3042         return 0;
3043 
3044     bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3045     nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3046     if (!nfit) {
3047         dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3048         return -ENODEV;
3049     }
3050     acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3051     sysfs_put(nfit);
3052     if (!acpi_desc->scrub_count_state) {
3053         dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3054         return -ENODEV;
3055     }
3056 
3057     return 0;
3058 }
3059 
3060 static void acpi_nfit_unregister(void *data)
3061 {
3062     struct acpi_nfit_desc *acpi_desc = data;
3063 
3064     nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3065 }
3066 
3067 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3068 {
3069     struct device *dev = acpi_desc->dev;
3070     struct nfit_table_prev prev;
3071     const void *end;
3072     int rc;
3073 
3074     if (!acpi_desc->nvdimm_bus) {
3075         acpi_nfit_init_dsms(acpi_desc);
3076 
3077         acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3078                 &acpi_desc->nd_desc);
3079         if (!acpi_desc->nvdimm_bus)
3080             return -ENOMEM;
3081 
3082         rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3083                 acpi_desc);
3084         if (rc)
3085             return rc;
3086 
3087         rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3088         if (rc)
3089             return rc;
3090 
3091         /* register this acpi_desc for mce notifications */
3092         mutex_lock(&acpi_desc_lock);
3093         list_add_tail(&acpi_desc->list, &acpi_descs);
3094         mutex_unlock(&acpi_desc_lock);
3095     }
3096 
3097     mutex_lock(&acpi_desc->init_mutex);
3098 
3099     INIT_LIST_HEAD(&prev.spas);
3100     INIT_LIST_HEAD(&prev.memdevs);
3101     INIT_LIST_HEAD(&prev.dcrs);
3102     INIT_LIST_HEAD(&prev.bdws);
3103     INIT_LIST_HEAD(&prev.idts);
3104     INIT_LIST_HEAD(&prev.flushes);
3105 
3106     list_cut_position(&prev.spas, &acpi_desc->spas,
3107                 acpi_desc->spas.prev);
3108     list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3109                 acpi_desc->memdevs.prev);
3110     list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3111                 acpi_desc->dcrs.prev);
3112     list_cut_position(&prev.bdws, &acpi_desc->bdws,
3113                 acpi_desc->bdws.prev);
3114     list_cut_position(&prev.idts, &acpi_desc->idts,
3115                 acpi_desc->idts.prev);
3116     list_cut_position(&prev.flushes, &acpi_desc->flushes,
3117                 acpi_desc->flushes.prev);
3118 
3119     end = data + sz;
3120     while (!IS_ERR_OR_NULL(data))
3121         data = add_table(acpi_desc, &prev, data, end);
3122 
3123     if (IS_ERR(data)) {
3124         dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
3125         rc = PTR_ERR(data);
3126         goto out_unlock;
3127     }
3128 
3129     rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3130     if (rc)
3131         goto out_unlock;
3132 
3133     rc = nfit_mem_init(acpi_desc);
3134     if (rc)
3135         goto out_unlock;
3136 
3137     rc = acpi_nfit_register_dimms(acpi_desc);
3138     if (rc)
3139         goto out_unlock;
3140 
3141     rc = acpi_nfit_register_regions(acpi_desc);
3142 
3143  out_unlock:
3144     mutex_unlock(&acpi_desc->init_mutex);
3145     return rc;
3146 }
3147 EXPORT_SYMBOL_GPL(acpi_nfit_init);
3148 
3149 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3150 {
3151     struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3152     struct device *dev = acpi_desc->dev;
3153 
3154     /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3155     device_lock(dev);
3156     device_unlock(dev);
3157 
3158     /* Bounce the init_mutex to complete initial registration */
3159     mutex_lock(&acpi_desc->init_mutex);
3160     mutex_unlock(&acpi_desc->init_mutex);
3161 
3162     return 0;
3163 }
3164 
3165 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3166         struct nvdimm *nvdimm, unsigned int cmd)
3167 {
3168     struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3169 
3170     if (nvdimm)
3171         return 0;
3172     if (cmd != ND_CMD_ARS_START)
3173         return 0;
3174 
3175     /*
3176      * The kernel and userspace may race to initiate a scrub, but
3177      * the scrub thread is prepared to lose that initial race.  It
3178      * just needs guarantees that any ARS it initiates are not
3179      * interrupted by any intervening start requests from userspace.
3180      */
3181     if (work_busy(&acpi_desc->dwork.work))
3182         return -EBUSY;
3183 
3184     return 0;
3185 }
3186 
3187 /*
3188  * Prevent security and firmware activate commands from being issued via
3189  * ioctl.
3190  */
3191 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3192         struct nvdimm *nvdimm, unsigned int cmd, void *buf)
3193 {
3194     struct nd_cmd_pkg *call_pkg = buf;
3195     unsigned int func;
3196 
3197     if (nvdimm && cmd == ND_CMD_CALL &&
3198             call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
3199         func = call_pkg->nd_command;
3200         if (func > NVDIMM_CMD_MAX ||
3201             (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
3202             return -EOPNOTSUPP;
3203     }
3204 
3205     /* block all non-nfit bus commands */
3206     if (!nvdimm && cmd == ND_CMD_CALL &&
3207             call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
3208         return -EOPNOTSUPP;
3209 
3210     return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
3211 }
3212 
3213 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
3214         enum nfit_ars_state req_type)
3215 {
3216     struct device *dev = acpi_desc->dev;
3217     int scheduled = 0, busy = 0;
3218     struct nfit_spa *nfit_spa;
3219 
3220     mutex_lock(&acpi_desc->init_mutex);
3221     if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
3222         mutex_unlock(&acpi_desc->init_mutex);
3223         return 0;
3224     }
3225 
3226     list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3227         int type = nfit_spa_type(nfit_spa->spa);
3228 
3229         if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
3230             continue;
3231         if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3232             continue;
3233 
3234         if (test_and_set_bit(req_type, &nfit_spa->ars_state))
3235             busy++;
3236         else
3237             scheduled++;
3238     }
3239     if (scheduled) {
3240         sched_ars(acpi_desc);
3241         dev_dbg(dev, "ars_scan triggered\n");
3242     }
3243     mutex_unlock(&acpi_desc->init_mutex);
3244 
3245     if (scheduled)
3246         return 0;
3247     if (busy)
3248         return -EBUSY;
3249     return -ENOTTY;
3250 }
3251 
3252 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3253 {
3254     struct nvdimm_bus_descriptor *nd_desc;
3255 
3256     dev_set_drvdata(dev, acpi_desc);
3257     acpi_desc->dev = dev;
3258     nd_desc = &acpi_desc->nd_desc;
3259     nd_desc->provider_name = "ACPI.NFIT";
3260     nd_desc->module = THIS_MODULE;
3261     nd_desc->ndctl = acpi_nfit_ctl;
3262     nd_desc->flush_probe = acpi_nfit_flush_probe;
3263     nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3264     nd_desc->attr_groups = acpi_nfit_attribute_groups;
3265 
3266     INIT_LIST_HEAD(&acpi_desc->spas);
3267     INIT_LIST_HEAD(&acpi_desc->dcrs);
3268     INIT_LIST_HEAD(&acpi_desc->bdws);
3269     INIT_LIST_HEAD(&acpi_desc->idts);
3270     INIT_LIST_HEAD(&acpi_desc->flushes);
3271     INIT_LIST_HEAD(&acpi_desc->memdevs);
3272     INIT_LIST_HEAD(&acpi_desc->dimms);
3273     INIT_LIST_HEAD(&acpi_desc->list);
3274     mutex_init(&acpi_desc->init_mutex);
3275     acpi_desc->scrub_tmo = 1;
3276     INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
3277 }
3278 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3279 
3280 static void acpi_nfit_put_table(void *table)
3281 {
3282     acpi_put_table(table);
3283 }
3284 
3285 void acpi_nfit_shutdown(void *data)
3286 {
3287     struct acpi_nfit_desc *acpi_desc = data;
3288     struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3289 
3290     /*
3291      * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3292      * race teardown
3293      */
3294     mutex_lock(&acpi_desc_lock);
3295     list_del(&acpi_desc->list);
3296     mutex_unlock(&acpi_desc_lock);
3297 
3298     mutex_lock(&acpi_desc->init_mutex);
3299     set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
3300     cancel_delayed_work_sync(&acpi_desc->dwork);
3301     mutex_unlock(&acpi_desc->init_mutex);
3302 
3303     /*
3304      * Bounce the nvdimm bus lock to make sure any in-flight
3305      * acpi_nfit_ars_rescan() submissions have had a chance to
3306      * either submit or see ->cancel set.
3307      */
3308     device_lock(bus_dev);
3309     device_unlock(bus_dev);
3310 
3311     flush_workqueue(nfit_wq);
3312 }
3313 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
3314 
3315 static int acpi_nfit_add(struct acpi_device *adev)
3316 {
3317     struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3318     struct acpi_nfit_desc *acpi_desc;
3319     struct device *dev = &adev->dev;
3320     struct acpi_table_header *tbl;
3321     acpi_status status = AE_OK;
3322     acpi_size sz;
3323     int rc = 0;
3324 
3325     status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
3326     if (ACPI_FAILURE(status)) {
3327         /* The NVDIMM root device allows OS to trigger enumeration of
3328          * NVDIMMs through NFIT at boot time and re-enumeration at
3329          * root level via the _FIT method during runtime.
3330          * This is ok to return 0 here, we could have an nvdimm
3331          * hotplugged later and evaluate _FIT method which returns
3332          * data in the format of a series of NFIT Structures.
3333          */
3334         dev_dbg(dev, "failed to find NFIT at startup\n");
3335         return 0;
3336     }
3337 
3338     rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
3339     if (rc)
3340         return rc;
3341     sz = tbl->length;
3342 
3343     acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3344     if (!acpi_desc)
3345         return -ENOMEM;
3346     acpi_nfit_desc_init(acpi_desc, &adev->dev);
3347 
3348     /* Save the acpi header for exporting the revision via sysfs */
3349     acpi_desc->acpi_header = *tbl;
3350 
3351     /* Evaluate _FIT and override with that if present */
3352     status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
3353     if (ACPI_SUCCESS(status) && buf.length > 0) {
3354         union acpi_object *obj = buf.pointer;
3355 
3356         if (obj->type == ACPI_TYPE_BUFFER)
3357             rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3358                     obj->buffer.length);
3359         else
3360             dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3361                 (int) obj->type);
3362         kfree(buf.pointer);
3363     } else
3364         /* skip over the lead-in header table */
3365         rc = acpi_nfit_init(acpi_desc, (void *) tbl
3366                 + sizeof(struct acpi_table_nfit),
3367                 sz - sizeof(struct acpi_table_nfit));
3368 
3369     if (rc)
3370         return rc;
3371     return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3372 }
3373 
3374 static int acpi_nfit_remove(struct acpi_device *adev)
3375 {
3376     /* see acpi_nfit_unregister */
3377     return 0;
3378 }
3379 
3380 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3381 {
3382     struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3383     struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3384     union acpi_object *obj;
3385     acpi_status status;
3386     int ret;
3387 
3388     if (!dev->driver) {
3389         /* dev->driver may be null if we're being removed */
3390         dev_dbg(dev, "no driver found for dev\n");
3391         return;
3392     }
3393 
3394     if (!acpi_desc) {
3395         acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3396         if (!acpi_desc)
3397             return;
3398         acpi_nfit_desc_init(acpi_desc, dev);
3399     } else {
3400         /*
3401          * Finish previous registration before considering new
3402          * regions.
3403          */
3404         flush_workqueue(nfit_wq);
3405     }
3406 
3407     /* Evaluate _FIT */
3408     status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
3409     if (ACPI_FAILURE(status)) {
3410         dev_err(dev, "failed to evaluate _FIT\n");
3411         return;
3412     }
3413 
3414     obj = buf.pointer;
3415     if (obj->type == ACPI_TYPE_BUFFER) {
3416         ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3417                 obj->buffer.length);
3418         if (ret)
3419             dev_err(dev, "failed to merge updated NFIT\n");
3420     } else
3421         dev_err(dev, "Invalid _FIT\n");
3422     kfree(buf.pointer);
3423 }
3424 
3425 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3426 {
3427     struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3428 
3429     if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
3430         acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
3431     else
3432         acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
3433 }
3434 
3435 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3436 {
3437     dev_dbg(dev, "event: 0x%x\n", event);
3438 
3439     switch (event) {
3440     case NFIT_NOTIFY_UPDATE:
3441         return acpi_nfit_update_notify(dev, handle);
3442     case NFIT_NOTIFY_UC_MEMORY_ERROR:
3443         return acpi_nfit_uc_error_notify(dev, handle);
3444     default:
3445         return;
3446     }
3447 }
3448 EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3449 
3450 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3451 {
3452     device_lock(&adev->dev);
3453     __acpi_nfit_notify(&adev->dev, adev->handle, event);
3454     device_unlock(&adev->dev);
3455 }
3456 
3457 static const struct acpi_device_id acpi_nfit_ids[] = {
3458     { "ACPI0012", 0 },
3459     { "", 0 },
3460 };
3461 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3462 
3463 static struct acpi_driver acpi_nfit_driver = {
3464     .name = KBUILD_MODNAME,
3465     .ids = acpi_nfit_ids,
3466     .ops = {
3467         .add = acpi_nfit_add,
3468         .remove = acpi_nfit_remove,
3469         .notify = acpi_nfit_notify,
3470     },
3471 };
3472 
3473 static __init int nfit_init(void)
3474 {
3475     int ret;
3476 
3477     BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3478     BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 64);
3479     BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3480     BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3481     BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3482     BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3483     BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3484     BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3485 
3486     guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3487     guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3488     guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3489     guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3490     guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3491     guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3492     guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3493     guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3494     guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3495     guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3496     guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3497     guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3498     guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3499     guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
3500     guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
3501 
3502     nfit_wq = create_singlethread_workqueue("nfit");
3503     if (!nfit_wq)
3504         return -ENOMEM;
3505 
3506     nfit_mce_register();
3507     ret = acpi_bus_register_driver(&acpi_nfit_driver);
3508     if (ret) {
3509         nfit_mce_unregister();
3510         destroy_workqueue(nfit_wq);
3511     }
3512 
3513     return ret;
3514 
3515 }
3516 
3517 static __exit void nfit_exit(void)
3518 {
3519     nfit_mce_unregister();
3520     acpi_bus_unregister_driver(&acpi_nfit_driver);
3521     destroy_workqueue(nfit_wq);
3522     WARN_ON(!list_empty(&acpi_descs));
3523 }
3524 
3525 module_init(nfit_init);
3526 module_exit(nfit_exit);
3527 MODULE_LICENSE("GPL v2");
3528 MODULE_AUTHOR("Intel Corporation");