0001
0002
0003
0004
0005 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0006 #include <linux/moduleparam.h>
0007 #include <linux/vmalloc.h>
0008 #include <linux/device.h>
0009 #include <linux/ndctl.h>
0010 #include <linux/slab.h>
0011 #include <linux/io.h>
0012 #include <linux/fs.h>
0013 #include <linux/mm.h>
0014 #include "nd-core.h"
0015 #include "label.h"
0016 #include "pmem.h"
0017 #include "nd.h"
0018
0019 static DEFINE_IDA(dimm_ida);
0020
0021
0022
0023
0024
0025 int nvdimm_check_config_data(struct device *dev)
0026 {
0027 struct nvdimm *nvdimm = to_nvdimm(dev);
0028
0029 if (!nvdimm->cmd_mask ||
0030 !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
0031 if (test_bit(NDD_LABELING, &nvdimm->flags))
0032 return -ENXIO;
0033 else
0034 return -ENOTTY;
0035 }
0036
0037 return 0;
0038 }
0039
0040 static int validate_dimm(struct nvdimm_drvdata *ndd)
0041 {
0042 int rc;
0043
0044 if (!ndd)
0045 return -EINVAL;
0046
0047 rc = nvdimm_check_config_data(ndd->dev);
0048 if (rc)
0049 dev_dbg(ndd->dev, "%ps: %s error: %d\n",
0050 __builtin_return_address(0), __func__, rc);
0051 return rc;
0052 }
0053
0054
0055
0056
0057
0058 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
0059 {
0060 struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
0061 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
0062 struct nvdimm_bus_descriptor *nd_desc;
0063 int rc = validate_dimm(ndd);
0064 int cmd_rc = 0;
0065
0066 if (rc)
0067 return rc;
0068
0069 if (cmd->config_size)
0070 return 0;
0071
0072 memset(cmd, 0, sizeof(*cmd));
0073 nd_desc = nvdimm_bus->nd_desc;
0074 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
0075 ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
0076 if (rc < 0)
0077 return rc;
0078 return cmd_rc;
0079 }
0080
0081 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
0082 size_t offset, size_t len)
0083 {
0084 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
0085 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
0086 int rc = validate_dimm(ndd), cmd_rc = 0;
0087 struct nd_cmd_get_config_data_hdr *cmd;
0088 size_t max_cmd_size, buf_offset;
0089
0090 if (rc)
0091 return rc;
0092
0093 if (offset + len > ndd->nsarea.config_size)
0094 return -ENXIO;
0095
0096 max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
0097 cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
0098 if (!cmd)
0099 return -ENOMEM;
0100
0101 for (buf_offset = 0; len;
0102 len -= cmd->in_length, buf_offset += cmd->in_length) {
0103 size_t cmd_size;
0104
0105 cmd->in_offset = offset + buf_offset;
0106 cmd->in_length = min(max_cmd_size, len);
0107
0108 cmd_size = sizeof(*cmd) + cmd->in_length;
0109
0110 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
0111 ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
0112 if (rc < 0)
0113 break;
0114 if (cmd_rc < 0) {
0115 rc = cmd_rc;
0116 break;
0117 }
0118
0119
0120 memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
0121 }
0122 kvfree(cmd);
0123
0124 return rc;
0125 }
0126
0127 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
0128 void *buf, size_t len)
0129 {
0130 size_t max_cmd_size, buf_offset;
0131 struct nd_cmd_set_config_hdr *cmd;
0132 int rc = validate_dimm(ndd), cmd_rc = 0;
0133 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
0134 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
0135
0136 if (rc)
0137 return rc;
0138
0139 if (offset + len > ndd->nsarea.config_size)
0140 return -ENXIO;
0141
0142 max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
0143 cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
0144 if (!cmd)
0145 return -ENOMEM;
0146
0147 for (buf_offset = 0; len; len -= cmd->in_length,
0148 buf_offset += cmd->in_length) {
0149 size_t cmd_size;
0150
0151 cmd->in_offset = offset + buf_offset;
0152 cmd->in_length = min(max_cmd_size, len);
0153 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
0154
0155
0156 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
0157
0158 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
0159 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
0160 if (rc < 0)
0161 break;
0162 if (cmd_rc < 0) {
0163 rc = cmd_rc;
0164 break;
0165 }
0166 }
0167 kvfree(cmd);
0168
0169 return rc;
0170 }
0171
0172 void nvdimm_set_labeling(struct device *dev)
0173 {
0174 struct nvdimm *nvdimm = to_nvdimm(dev);
0175
0176 set_bit(NDD_LABELING, &nvdimm->flags);
0177 }
0178
0179 void nvdimm_set_locked(struct device *dev)
0180 {
0181 struct nvdimm *nvdimm = to_nvdimm(dev);
0182
0183 set_bit(NDD_LOCKED, &nvdimm->flags);
0184 }
0185
0186 void nvdimm_clear_locked(struct device *dev)
0187 {
0188 struct nvdimm *nvdimm = to_nvdimm(dev);
0189
0190 clear_bit(NDD_LOCKED, &nvdimm->flags);
0191 }
0192
0193 static void nvdimm_release(struct device *dev)
0194 {
0195 struct nvdimm *nvdimm = to_nvdimm(dev);
0196
0197 ida_simple_remove(&dimm_ida, nvdimm->id);
0198 kfree(nvdimm);
0199 }
0200
0201 struct nvdimm *to_nvdimm(struct device *dev)
0202 {
0203 struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
0204
0205 WARN_ON(!is_nvdimm(dev));
0206 return nvdimm;
0207 }
0208 EXPORT_SYMBOL_GPL(to_nvdimm);
0209
0210 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
0211 {
0212 struct nvdimm *nvdimm = nd_mapping->nvdimm;
0213
0214 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
0215
0216 return dev_get_drvdata(&nvdimm->dev);
0217 }
0218 EXPORT_SYMBOL(to_ndd);
0219
0220 void nvdimm_drvdata_release(struct kref *kref)
0221 {
0222 struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
0223 struct device *dev = ndd->dev;
0224 struct resource *res, *_r;
0225
0226 dev_dbg(dev, "trace\n");
0227 nvdimm_bus_lock(dev);
0228 for_each_dpa_resource_safe(ndd, res, _r)
0229 nvdimm_free_dpa(ndd, res);
0230 nvdimm_bus_unlock(dev);
0231
0232 kvfree(ndd->data);
0233 kfree(ndd);
0234 put_device(dev);
0235 }
0236
0237 void get_ndd(struct nvdimm_drvdata *ndd)
0238 {
0239 kref_get(&ndd->kref);
0240 }
0241
0242 void put_ndd(struct nvdimm_drvdata *ndd)
0243 {
0244 if (ndd)
0245 kref_put(&ndd->kref, nvdimm_drvdata_release);
0246 }
0247
0248 const char *nvdimm_name(struct nvdimm *nvdimm)
0249 {
0250 return dev_name(&nvdimm->dev);
0251 }
0252 EXPORT_SYMBOL_GPL(nvdimm_name);
0253
0254 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
0255 {
0256 return &nvdimm->dev.kobj;
0257 }
0258 EXPORT_SYMBOL_GPL(nvdimm_kobj);
0259
0260 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
0261 {
0262 return nvdimm->cmd_mask;
0263 }
0264 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
0265
0266 void *nvdimm_provider_data(struct nvdimm *nvdimm)
0267 {
0268 if (nvdimm)
0269 return nvdimm->provider_data;
0270 return NULL;
0271 }
0272 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
0273
0274 static ssize_t commands_show(struct device *dev,
0275 struct device_attribute *attr, char *buf)
0276 {
0277 struct nvdimm *nvdimm = to_nvdimm(dev);
0278 int cmd, len = 0;
0279
0280 if (!nvdimm->cmd_mask)
0281 return sprintf(buf, "\n");
0282
0283 for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
0284 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
0285 len += sprintf(buf + len, "\n");
0286 return len;
0287 }
0288 static DEVICE_ATTR_RO(commands);
0289
0290 static ssize_t flags_show(struct device *dev,
0291 struct device_attribute *attr, char *buf)
0292 {
0293 struct nvdimm *nvdimm = to_nvdimm(dev);
0294
0295 return sprintf(buf, "%s%s\n",
0296 test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
0297 test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
0298 }
0299 static DEVICE_ATTR_RO(flags);
0300
0301 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
0302 char *buf)
0303 {
0304 struct nvdimm *nvdimm = to_nvdimm(dev);
0305
0306
0307
0308
0309
0310 nvdimm_bus_lock(dev);
0311 nvdimm_bus_unlock(dev);
0312 return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
0313 ? "active" : "idle");
0314 }
0315 static DEVICE_ATTR_RO(state);
0316
0317 static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
0318 {
0319 struct device *dev;
0320 ssize_t rc;
0321 u32 nfree;
0322
0323 if (!ndd)
0324 return -ENXIO;
0325
0326 dev = ndd->dev;
0327 nvdimm_bus_lock(dev);
0328 nfree = nd_label_nfree(ndd);
0329 if (nfree - 1 > nfree) {
0330 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
0331 nfree = 0;
0332 } else
0333 nfree--;
0334 rc = sprintf(buf, "%d\n", nfree);
0335 nvdimm_bus_unlock(dev);
0336 return rc;
0337 }
0338
0339 static ssize_t available_slots_show(struct device *dev,
0340 struct device_attribute *attr, char *buf)
0341 {
0342 ssize_t rc;
0343
0344 device_lock(dev);
0345 rc = __available_slots_show(dev_get_drvdata(dev), buf);
0346 device_unlock(dev);
0347
0348 return rc;
0349 }
0350 static DEVICE_ATTR_RO(available_slots);
0351
0352 __weak ssize_t security_show(struct device *dev,
0353 struct device_attribute *attr, char *buf)
0354 {
0355 struct nvdimm *nvdimm = to_nvdimm(dev);
0356
0357 if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
0358 return sprintf(buf, "overwrite\n");
0359 if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
0360 return sprintf(buf, "disabled\n");
0361 if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
0362 return sprintf(buf, "unlocked\n");
0363 if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
0364 return sprintf(buf, "locked\n");
0365 return -ENOTTY;
0366 }
0367
0368 static ssize_t frozen_show(struct device *dev,
0369 struct device_attribute *attr, char *buf)
0370 {
0371 struct nvdimm *nvdimm = to_nvdimm(dev);
0372
0373 return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
0374 &nvdimm->sec.flags));
0375 }
0376 static DEVICE_ATTR_RO(frozen);
0377
0378 static ssize_t security_store(struct device *dev,
0379 struct device_attribute *attr, const char *buf, size_t len)
0380
0381 {
0382 ssize_t rc;
0383
0384
0385
0386
0387
0388
0389 device_lock(dev);
0390 nvdimm_bus_lock(dev);
0391 wait_nvdimm_bus_probe_idle(dev);
0392 rc = nvdimm_security_store(dev, buf, len);
0393 nvdimm_bus_unlock(dev);
0394 device_unlock(dev);
0395
0396 return rc;
0397 }
0398 static DEVICE_ATTR_RW(security);
0399
0400 static struct attribute *nvdimm_attributes[] = {
0401 &dev_attr_state.attr,
0402 &dev_attr_flags.attr,
0403 &dev_attr_commands.attr,
0404 &dev_attr_available_slots.attr,
0405 &dev_attr_security.attr,
0406 &dev_attr_frozen.attr,
0407 NULL,
0408 };
0409
0410 static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
0411 {
0412 struct device *dev = container_of(kobj, typeof(*dev), kobj);
0413 struct nvdimm *nvdimm = to_nvdimm(dev);
0414
0415 if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
0416 return a->mode;
0417 if (!nvdimm->sec.flags)
0418 return 0;
0419
0420 if (a == &dev_attr_security.attr) {
0421
0422 if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
0423 || nvdimm->sec.ops->change_key
0424 || nvdimm->sec.ops->erase
0425 || nvdimm->sec.ops->overwrite)
0426 return a->mode;
0427 return 0444;
0428 }
0429
0430 if (nvdimm->sec.ops->freeze)
0431 return a->mode;
0432 return 0;
0433 }
0434
0435 static const struct attribute_group nvdimm_attribute_group = {
0436 .attrs = nvdimm_attributes,
0437 .is_visible = nvdimm_visible,
0438 };
0439
0440 static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
0441 {
0442 struct nvdimm *nvdimm = to_nvdimm(dev);
0443 enum nvdimm_fwa_result result;
0444
0445 if (!nvdimm->fw_ops)
0446 return -EOPNOTSUPP;
0447
0448 nvdimm_bus_lock(dev);
0449 result = nvdimm->fw_ops->activate_result(nvdimm);
0450 nvdimm_bus_unlock(dev);
0451
0452 switch (result) {
0453 case NVDIMM_FWA_RESULT_NONE:
0454 return sprintf(buf, "none\n");
0455 case NVDIMM_FWA_RESULT_SUCCESS:
0456 return sprintf(buf, "success\n");
0457 case NVDIMM_FWA_RESULT_FAIL:
0458 return sprintf(buf, "fail\n");
0459 case NVDIMM_FWA_RESULT_NOTSTAGED:
0460 return sprintf(buf, "not_staged\n");
0461 case NVDIMM_FWA_RESULT_NEEDRESET:
0462 return sprintf(buf, "need_reset\n");
0463 default:
0464 return -ENXIO;
0465 }
0466 }
0467 static DEVICE_ATTR_ADMIN_RO(result);
0468
0469 static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
0470 {
0471 struct nvdimm *nvdimm = to_nvdimm(dev);
0472 enum nvdimm_fwa_state state;
0473
0474 if (!nvdimm->fw_ops)
0475 return -EOPNOTSUPP;
0476
0477 nvdimm_bus_lock(dev);
0478 state = nvdimm->fw_ops->activate_state(nvdimm);
0479 nvdimm_bus_unlock(dev);
0480
0481 switch (state) {
0482 case NVDIMM_FWA_IDLE:
0483 return sprintf(buf, "idle\n");
0484 case NVDIMM_FWA_BUSY:
0485 return sprintf(buf, "busy\n");
0486 case NVDIMM_FWA_ARMED:
0487 return sprintf(buf, "armed\n");
0488 default:
0489 return -ENXIO;
0490 }
0491 }
0492
0493 static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
0494 const char *buf, size_t len)
0495 {
0496 struct nvdimm *nvdimm = to_nvdimm(dev);
0497 enum nvdimm_fwa_trigger arg;
0498 int rc;
0499
0500 if (!nvdimm->fw_ops)
0501 return -EOPNOTSUPP;
0502
0503 if (sysfs_streq(buf, "arm"))
0504 arg = NVDIMM_FWA_ARM;
0505 else if (sysfs_streq(buf, "disarm"))
0506 arg = NVDIMM_FWA_DISARM;
0507 else
0508 return -EINVAL;
0509
0510 nvdimm_bus_lock(dev);
0511 rc = nvdimm->fw_ops->arm(nvdimm, arg);
0512 nvdimm_bus_unlock(dev);
0513
0514 if (rc < 0)
0515 return rc;
0516 return len;
0517 }
0518 static DEVICE_ATTR_ADMIN_RW(activate);
0519
0520 static struct attribute *nvdimm_firmware_attributes[] = {
0521 &dev_attr_activate.attr,
0522 &dev_attr_result.attr,
0523 NULL,
0524 };
0525
0526 static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
0527 {
0528 struct device *dev = container_of(kobj, typeof(*dev), kobj);
0529 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
0530 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
0531 struct nvdimm *nvdimm = to_nvdimm(dev);
0532 enum nvdimm_fwa_capability cap;
0533
0534 if (!nd_desc->fw_ops)
0535 return 0;
0536 if (!nvdimm->fw_ops)
0537 return 0;
0538
0539 nvdimm_bus_lock(dev);
0540 cap = nd_desc->fw_ops->capability(nd_desc);
0541 nvdimm_bus_unlock(dev);
0542
0543 if (cap < NVDIMM_FWA_CAP_QUIESCE)
0544 return 0;
0545
0546 return a->mode;
0547 }
0548
0549 static const struct attribute_group nvdimm_firmware_attribute_group = {
0550 .name = "firmware",
0551 .attrs = nvdimm_firmware_attributes,
0552 .is_visible = nvdimm_firmware_visible,
0553 };
0554
0555 static const struct attribute_group *nvdimm_attribute_groups[] = {
0556 &nd_device_attribute_group,
0557 &nvdimm_attribute_group,
0558 &nvdimm_firmware_attribute_group,
0559 NULL,
0560 };
0561
0562 static const struct device_type nvdimm_device_type = {
0563 .name = "nvdimm",
0564 .release = nvdimm_release,
0565 .groups = nvdimm_attribute_groups,
0566 };
0567
0568 bool is_nvdimm(struct device *dev)
0569 {
0570 return dev->type == &nvdimm_device_type;
0571 }
0572
0573 static struct lock_class_key nvdimm_key;
0574
0575 struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
0576 void *provider_data, const struct attribute_group **groups,
0577 unsigned long flags, unsigned long cmd_mask, int num_flush,
0578 struct resource *flush_wpq, const char *dimm_id,
0579 const struct nvdimm_security_ops *sec_ops,
0580 const struct nvdimm_fw_ops *fw_ops)
0581 {
0582 struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
0583 struct device *dev;
0584
0585 if (!nvdimm)
0586 return NULL;
0587
0588 nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
0589 if (nvdimm->id < 0) {
0590 kfree(nvdimm);
0591 return NULL;
0592 }
0593
0594 nvdimm->dimm_id = dimm_id;
0595 nvdimm->provider_data = provider_data;
0596 nvdimm->flags = flags;
0597 nvdimm->cmd_mask = cmd_mask;
0598 nvdimm->num_flush = num_flush;
0599 nvdimm->flush_wpq = flush_wpq;
0600 atomic_set(&nvdimm->busy, 0);
0601 dev = &nvdimm->dev;
0602 dev_set_name(dev, "nmem%d", nvdimm->id);
0603 dev->parent = &nvdimm_bus->dev;
0604 dev->type = &nvdimm_device_type;
0605 dev->devt = MKDEV(nvdimm_major, nvdimm->id);
0606 dev->groups = groups;
0607 nvdimm->sec.ops = sec_ops;
0608 nvdimm->fw_ops = fw_ops;
0609 nvdimm->sec.overwrite_tmo = 0;
0610 INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
0611
0612
0613
0614
0615
0616 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
0617 nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
0618 device_initialize(dev);
0619 lockdep_set_class(&dev->mutex, &nvdimm_key);
0620 nd_device_register(dev);
0621
0622 return nvdimm;
0623 }
0624 EXPORT_SYMBOL_GPL(__nvdimm_create);
0625
0626 void nvdimm_delete(struct nvdimm *nvdimm)
0627 {
0628 struct device *dev = &nvdimm->dev;
0629 bool dev_put = false;
0630
0631
0632 nvdimm_bus_lock(dev);
0633 set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
0634 if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
0635 dev_put = true;
0636 nvdimm_bus_unlock(dev);
0637 cancel_delayed_work_sync(&nvdimm->dwork);
0638 if (dev_put)
0639 put_device(dev);
0640 nd_device_unregister(dev, ND_SYNC);
0641 }
0642 EXPORT_SYMBOL_GPL(nvdimm_delete);
0643
0644 static void shutdown_security_notify(void *data)
0645 {
0646 struct nvdimm *nvdimm = data;
0647
0648 sysfs_put(nvdimm->sec.overwrite_state);
0649 }
0650
0651 int nvdimm_security_setup_events(struct device *dev)
0652 {
0653 struct nvdimm *nvdimm = to_nvdimm(dev);
0654
0655 if (!nvdimm->sec.flags || !nvdimm->sec.ops
0656 || !nvdimm->sec.ops->overwrite)
0657 return 0;
0658 nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
0659 if (!nvdimm->sec.overwrite_state)
0660 return -ENOMEM;
0661
0662 return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
0663 }
0664 EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
0665
0666 int nvdimm_in_overwrite(struct nvdimm *nvdimm)
0667 {
0668 return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
0669 }
0670 EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
0671
0672 int nvdimm_security_freeze(struct nvdimm *nvdimm)
0673 {
0674 int rc;
0675
0676 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
0677
0678 if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
0679 return -EOPNOTSUPP;
0680
0681 if (!nvdimm->sec.flags)
0682 return -EIO;
0683
0684 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
0685 dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
0686 return -EBUSY;
0687 }
0688
0689 rc = nvdimm->sec.ops->freeze(nvdimm);
0690 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
0691
0692 return rc;
0693 }
0694
0695 static unsigned long dpa_align(struct nd_region *nd_region)
0696 {
0697 struct device *dev = &nd_region->dev;
0698
0699 if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
0700 "bus lock required for capacity provision\n"))
0701 return 0;
0702 if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
0703 % nd_region->ndr_mappings,
0704 "invalid region align %#lx mappings: %d\n",
0705 nd_region->align, nd_region->ndr_mappings))
0706 return 0;
0707 return nd_region->align / nd_region->ndr_mappings;
0708 }
0709
0710
0711
0712
0713
0714
0715
0716 resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
0717 struct nd_mapping *nd_mapping)
0718 {
0719 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0720 struct nvdimm_bus *nvdimm_bus;
0721 resource_size_t max = 0;
0722 struct resource *res;
0723 unsigned long align;
0724
0725
0726 if (!ndd)
0727 return 0;
0728
0729 align = dpa_align(nd_region);
0730 if (!align)
0731 return 0;
0732
0733 nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
0734 if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
0735 return 0;
0736 for_each_dpa_resource(ndd, res) {
0737 resource_size_t start, end;
0738
0739 if (strcmp(res->name, "pmem-reserve") != 0)
0740 continue;
0741
0742 start = ALIGN(res->start, align);
0743 end = ALIGN_DOWN(res->end + 1, align) - 1;
0744 if (end < start)
0745 continue;
0746 if (end - start + 1 > max)
0747 max = end - start + 1;
0748 }
0749 release_free_pmem(nvdimm_bus, nd_mapping);
0750 return max;
0751 }
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
0762 struct nd_mapping *nd_mapping)
0763 {
0764 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0765 resource_size_t map_start, map_end, busy = 0;
0766 struct resource *res;
0767 unsigned long align;
0768
0769 if (!ndd)
0770 return 0;
0771
0772 align = dpa_align(nd_region);
0773 if (!align)
0774 return 0;
0775
0776 map_start = nd_mapping->start;
0777 map_end = map_start + nd_mapping->size - 1;
0778 for_each_dpa_resource(ndd, res) {
0779 resource_size_t start, end;
0780
0781 start = ALIGN_DOWN(res->start, align);
0782 end = ALIGN(res->end + 1, align) - 1;
0783 if (start >= map_start && start < map_end) {
0784 if (end > map_end) {
0785 nd_dbg_dpa(nd_region, ndd, res,
0786 "misaligned to iset\n");
0787 return 0;
0788 }
0789 busy += end - start + 1;
0790 } else if (end >= map_start && end <= map_end) {
0791 busy += end - start + 1;
0792 } else if (map_start > start && map_start < end) {
0793
0794 busy += nd_mapping->size;
0795 }
0796 }
0797
0798 if (busy < nd_mapping->size)
0799 return ALIGN_DOWN(nd_mapping->size - busy, align);
0800 return 0;
0801 }
0802
0803 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
0804 {
0805 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
0806 kfree(res->name);
0807 __release_region(&ndd->dpa, res->start, resource_size(res));
0808 }
0809
0810 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
0811 struct nd_label_id *label_id, resource_size_t start,
0812 resource_size_t n)
0813 {
0814 char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
0815 struct resource *res;
0816
0817 if (!name)
0818 return NULL;
0819
0820 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
0821 res = __request_region(&ndd->dpa, start, n, name, 0);
0822 if (!res)
0823 kfree(name);
0824 return res;
0825 }
0826
0827
0828
0829
0830
0831
0832 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
0833 struct nd_label_id *label_id)
0834 {
0835 resource_size_t allocated = 0;
0836 struct resource *res;
0837
0838 for_each_dpa_resource(ndd, res)
0839 if (strcmp(res->name, label_id->id) == 0)
0840 allocated += resource_size(res);
0841
0842 return allocated;
0843 }
0844
0845 static int count_dimms(struct device *dev, void *c)
0846 {
0847 int *count = c;
0848
0849 if (is_nvdimm(dev))
0850 (*count)++;
0851 return 0;
0852 }
0853
0854 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
0855 {
0856 int count = 0;
0857
0858 nd_synchronize();
0859
0860 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
0861 dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
0862 if (count != dimm_count)
0863 return -ENXIO;
0864 return 0;
0865 }
0866 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
0867
0868 void __exit nvdimm_devs_exit(void)
0869 {
0870 ida_destroy(&dimm_ida);
0871 }