0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/err.h>
0020 #include <linux/module.h>
0021 #include <linux/moduleparam.h>
0022 #include <linux/stringify.h>
0023 #include <linux/namei.h>
0024 #include <linux/stat.h>
0025 #include <linux/miscdevice.h>
0026 #include <linux/mtd/partitions.h>
0027 #include <linux/log2.h>
0028 #include <linux/kthread.h>
0029 #include <linux/kernel.h>
0030 #include <linux/slab.h>
0031 #include <linux/major.h>
0032 #include "ubi.h"
0033
0034
0035 #define MTD_PARAM_LEN_MAX 64
0036
0037
0038 #define MTD_PARAM_MAX_COUNT 4
0039
0040
0041 #define MAX_MTD_UBI_BEB_LIMIT 768
0042
0043 #ifdef CONFIG_MTD_UBI_MODULE
0044 #define ubi_is_module() 1
0045 #else
0046 #define ubi_is_module() 0
0047 #endif
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 struct mtd_dev_param {
0058 char name[MTD_PARAM_LEN_MAX];
0059 int ubi_num;
0060 int vid_hdr_offs;
0061 int max_beb_per1024;
0062 };
0063
0064
0065 static int mtd_devs;
0066
0067
0068 static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
0069 #ifdef CONFIG_MTD_UBI_FASTMAP
0070
0071 static bool fm_autoconvert;
0072 static bool fm_debug;
0073 #endif
0074
0075
0076 struct kmem_cache *ubi_wl_entry_slab;
0077
0078
0079 static struct miscdevice ubi_ctrl_cdev = {
0080 .minor = MISC_DYNAMIC_MINOR,
0081 .name = "ubi_ctrl",
0082 .fops = &ubi_ctrl_cdev_operations,
0083 };
0084
0085
0086 static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
0087
0088
0089 DEFINE_MUTEX(ubi_devices_mutex);
0090
0091
0092 static DEFINE_SPINLOCK(ubi_devices_lock);
0093
0094
0095
0096 static ssize_t version_show(struct class *class, struct class_attribute *attr,
0097 char *buf)
0098 {
0099 return sprintf(buf, "%d\n", UBI_VERSION);
0100 }
0101 static CLASS_ATTR_RO(version);
0102
0103 static struct attribute *ubi_class_attrs[] = {
0104 &class_attr_version.attr,
0105 NULL,
0106 };
0107 ATTRIBUTE_GROUPS(ubi_class);
0108
0109
0110 struct class ubi_class = {
0111 .name = UBI_NAME_STR,
0112 .owner = THIS_MODULE,
0113 .class_groups = ubi_class_groups,
0114 };
0115
0116 static ssize_t dev_attribute_show(struct device *dev,
0117 struct device_attribute *attr, char *buf);
0118
0119
0120 static struct device_attribute dev_eraseblock_size =
0121 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
0122 static struct device_attribute dev_avail_eraseblocks =
0123 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
0124 static struct device_attribute dev_total_eraseblocks =
0125 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
0126 static struct device_attribute dev_volumes_count =
0127 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
0128 static struct device_attribute dev_max_ec =
0129 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
0130 static struct device_attribute dev_reserved_for_bad =
0131 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
0132 static struct device_attribute dev_bad_peb_count =
0133 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
0134 static struct device_attribute dev_max_vol_count =
0135 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
0136 static struct device_attribute dev_min_io_size =
0137 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
0138 static struct device_attribute dev_bgt_enabled =
0139 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
0140 static struct device_attribute dev_mtd_num =
0141 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
0142 static struct device_attribute dev_ro_mode =
0143 __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
0156 {
0157 int ret;
0158 struct ubi_notification nt;
0159
0160 ubi_do_get_device_info(ubi, &nt.di);
0161 ubi_do_get_volume_info(ubi, vol, &nt.vi);
0162
0163 switch (ntype) {
0164 case UBI_VOLUME_ADDED:
0165 case UBI_VOLUME_REMOVED:
0166 case UBI_VOLUME_RESIZED:
0167 case UBI_VOLUME_RENAMED:
0168 ret = ubi_update_fastmap(ubi);
0169 if (ret)
0170 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
0171 }
0172
0173 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
0174 }
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
0188 {
0189 struct ubi_notification nt;
0190 int i, count = 0;
0191
0192 ubi_do_get_device_info(ubi, &nt.di);
0193
0194 mutex_lock(&ubi->device_mutex);
0195 for (i = 0; i < ubi->vtbl_slots; i++) {
0196
0197
0198
0199
0200
0201 if (!ubi->volumes[i])
0202 continue;
0203
0204 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
0205 if (nb)
0206 nb->notifier_call(nb, ntype, &nt);
0207 else
0208 blocking_notifier_call_chain(&ubi_notifiers, ntype,
0209 &nt);
0210 count += 1;
0211 }
0212 mutex_unlock(&ubi->device_mutex);
0213
0214 return count;
0215 }
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226 int ubi_enumerate_volumes(struct notifier_block *nb)
0227 {
0228 int i, count = 0;
0229
0230
0231
0232
0233
0234 for (i = 0; i < UBI_MAX_DEVICES; i++) {
0235 struct ubi_device *ubi = ubi_devices[i];
0236
0237 if (!ubi)
0238 continue;
0239 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
0240 }
0241
0242 return count;
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 struct ubi_device *ubi_get_device(int ubi_num)
0255 {
0256 struct ubi_device *ubi;
0257
0258 spin_lock(&ubi_devices_lock);
0259 ubi = ubi_devices[ubi_num];
0260 if (ubi) {
0261 ubi_assert(ubi->ref_count >= 0);
0262 ubi->ref_count += 1;
0263 get_device(&ubi->dev);
0264 }
0265 spin_unlock(&ubi_devices_lock);
0266
0267 return ubi;
0268 }
0269
0270
0271
0272
0273
0274 void ubi_put_device(struct ubi_device *ubi)
0275 {
0276 spin_lock(&ubi_devices_lock);
0277 ubi->ref_count -= 1;
0278 put_device(&ubi->dev);
0279 spin_unlock(&ubi_devices_lock);
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289 struct ubi_device *ubi_get_by_major(int major)
0290 {
0291 int i;
0292 struct ubi_device *ubi;
0293
0294 spin_lock(&ubi_devices_lock);
0295 for (i = 0; i < UBI_MAX_DEVICES; i++) {
0296 ubi = ubi_devices[i];
0297 if (ubi && MAJOR(ubi->cdev.dev) == major) {
0298 ubi_assert(ubi->ref_count >= 0);
0299 ubi->ref_count += 1;
0300 get_device(&ubi->dev);
0301 spin_unlock(&ubi_devices_lock);
0302 return ubi;
0303 }
0304 }
0305 spin_unlock(&ubi_devices_lock);
0306
0307 return NULL;
0308 }
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318 int ubi_major2num(int major)
0319 {
0320 int i, ubi_num = -ENODEV;
0321
0322 spin_lock(&ubi_devices_lock);
0323 for (i = 0; i < UBI_MAX_DEVICES; i++) {
0324 struct ubi_device *ubi = ubi_devices[i];
0325
0326 if (ubi && MAJOR(ubi->cdev.dev) == major) {
0327 ubi_num = ubi->ubi_num;
0328 break;
0329 }
0330 }
0331 spin_unlock(&ubi_devices_lock);
0332
0333 return ubi_num;
0334 }
0335
0336
0337 static ssize_t dev_attribute_show(struct device *dev,
0338 struct device_attribute *attr, char *buf)
0339 {
0340 ssize_t ret;
0341 struct ubi_device *ubi;
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 ubi = container_of(dev, struct ubi_device, dev);
0354
0355 if (attr == &dev_eraseblock_size)
0356 ret = sprintf(buf, "%d\n", ubi->leb_size);
0357 else if (attr == &dev_avail_eraseblocks)
0358 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
0359 else if (attr == &dev_total_eraseblocks)
0360 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
0361 else if (attr == &dev_volumes_count)
0362 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
0363 else if (attr == &dev_max_ec)
0364 ret = sprintf(buf, "%d\n", ubi->max_ec);
0365 else if (attr == &dev_reserved_for_bad)
0366 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
0367 else if (attr == &dev_bad_peb_count)
0368 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
0369 else if (attr == &dev_max_vol_count)
0370 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
0371 else if (attr == &dev_min_io_size)
0372 ret = sprintf(buf, "%d\n", ubi->min_io_size);
0373 else if (attr == &dev_bgt_enabled)
0374 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
0375 else if (attr == &dev_mtd_num)
0376 ret = sprintf(buf, "%d\n", ubi->mtd->index);
0377 else if (attr == &dev_ro_mode)
0378 ret = sprintf(buf, "%d\n", ubi->ro_mode);
0379 else
0380 ret = -EINVAL;
0381
0382 return ret;
0383 }
0384
0385 static struct attribute *ubi_dev_attrs[] = {
0386 &dev_eraseblock_size.attr,
0387 &dev_avail_eraseblocks.attr,
0388 &dev_total_eraseblocks.attr,
0389 &dev_volumes_count.attr,
0390 &dev_max_ec.attr,
0391 &dev_reserved_for_bad.attr,
0392 &dev_bad_peb_count.attr,
0393 &dev_max_vol_count.attr,
0394 &dev_min_io_size.attr,
0395 &dev_bgt_enabled.attr,
0396 &dev_mtd_num.attr,
0397 &dev_ro_mode.attr,
0398 NULL
0399 };
0400 ATTRIBUTE_GROUPS(ubi_dev);
0401
0402 static void dev_release(struct device *dev)
0403 {
0404 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
0405
0406 kfree(ubi);
0407 }
0408
0409
0410
0411
0412
0413 static void kill_volumes(struct ubi_device *ubi)
0414 {
0415 int i;
0416
0417 for (i = 0; i < ubi->vtbl_slots; i++)
0418 if (ubi->volumes[i])
0419 ubi_free_volume(ubi, ubi->volumes[i]);
0420 }
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433 static int uif_init(struct ubi_device *ubi)
0434 {
0435 int i, err;
0436 dev_t dev;
0437
0438 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
0449 if (err) {
0450 ubi_err(ubi, "cannot register UBI character devices");
0451 return err;
0452 }
0453
0454 ubi->dev.devt = dev;
0455
0456 ubi_assert(MINOR(dev) == 0);
0457 cdev_init(&ubi->cdev, &ubi_cdev_operations);
0458 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
0459 ubi->cdev.owner = THIS_MODULE;
0460
0461 dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num);
0462 err = cdev_device_add(&ubi->cdev, &ubi->dev);
0463 if (err)
0464 goto out_unreg;
0465
0466 for (i = 0; i < ubi->vtbl_slots; i++)
0467 if (ubi->volumes[i]) {
0468 err = ubi_add_volume(ubi, ubi->volumes[i]);
0469 if (err) {
0470 ubi_err(ubi, "cannot add volume %d", i);
0471 goto out_volumes;
0472 }
0473 }
0474
0475 return 0;
0476
0477 out_volumes:
0478 kill_volumes(ubi);
0479 cdev_device_del(&ubi->cdev, &ubi->dev);
0480 out_unreg:
0481 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
0482 ubi_err(ubi, "cannot initialize UBI %s, error %d",
0483 ubi->ubi_name, err);
0484 return err;
0485 }
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495 static void uif_close(struct ubi_device *ubi)
0496 {
0497 kill_volumes(ubi);
0498 cdev_device_del(&ubi->cdev, &ubi->dev);
0499 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
0500 }
0501
0502
0503
0504
0505
0506
0507 static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
0508 {
0509 int i;
0510
0511 for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
0512 if (!ubi->volumes[i])
0513 continue;
0514 ubi_eba_replace_table(ubi->volumes[i], NULL);
0515 ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
0516 kfree(ubi->volumes[i]);
0517 ubi->volumes[i] = NULL;
0518 }
0519 }
0520
0521
0522
0523
0524
0525 void ubi_free_all_volumes(struct ubi_device *ubi)
0526 {
0527 ubi_free_volumes_from(ubi, 0);
0528 }
0529
0530
0531
0532
0533
0534 void ubi_free_internal_volumes(struct ubi_device *ubi)
0535 {
0536 ubi_free_volumes_from(ubi, ubi->vtbl_slots);
0537 }
0538
0539 static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
0540 {
0541 int limit, device_pebs;
0542 uint64_t device_size;
0543
0544 if (!max_beb_per1024) {
0545
0546
0547
0548
0549
0550 limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
0551 if (limit < 0)
0552 return 0;
0553 return limit;
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565 device_size = mtd_get_device_size(ubi->mtd);
0566 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
0567 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
0568
0569
0570 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
0571 limit += 1;
0572
0573 return limit;
0574 }
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 static int io_init(struct ubi_device *ubi, int max_beb_per1024)
0593 {
0594 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
0595 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
0596
0597 if (ubi->mtd->numeraseregions != 0) {
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 ubi_err(ubi, "multiple regions, not implemented");
0608 return -EINVAL;
0609 }
0610
0611 if (ubi->vid_hdr_offset < 0)
0612 return -EINVAL;
0613
0614
0615
0616
0617
0618
0619 ubi->peb_size = ubi->mtd->erasesize;
0620 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
0621 ubi->flash_size = ubi->mtd->size;
0622
0623 if (mtd_can_have_bb(ubi->mtd)) {
0624 ubi->bad_allowed = 1;
0625 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
0626 }
0627
0628 if (ubi->mtd->type == MTD_NORFLASH)
0629 ubi->nor_flash = 1;
0630
0631 ubi->min_io_size = ubi->mtd->writesize;
0632 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
0633
0634
0635
0636
0637
0638
0639 if (!is_power_of_2(ubi->min_io_size)) {
0640 ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
0641 ubi->min_io_size);
0642 return -EINVAL;
0643 }
0644
0645 ubi_assert(ubi->hdrs_min_io_size > 0);
0646 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
0647 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
0648
0649 ubi->max_write_size = ubi->mtd->writebufsize;
0650
0651
0652
0653
0654 if (ubi->max_write_size < ubi->min_io_size ||
0655 ubi->max_write_size % ubi->min_io_size ||
0656 !is_power_of_2(ubi->max_write_size)) {
0657 ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
0658 ubi->max_write_size, ubi->min_io_size);
0659 return -EINVAL;
0660 }
0661
0662
0663 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
0664 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
0665
0666 dbg_gen("min_io_size %d", ubi->min_io_size);
0667 dbg_gen("max_write_size %d", ubi->max_write_size);
0668 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
0669 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
0670 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
0671
0672 if (ubi->vid_hdr_offset == 0)
0673
0674 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
0675 ubi->ec_hdr_alsize;
0676 else {
0677 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
0678 ~(ubi->hdrs_min_io_size - 1);
0679 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
0680 ubi->vid_hdr_aloffset;
0681 }
0682
0683
0684 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
0685 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
0686
0687 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
0688 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
0689 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
0690 dbg_gen("leb_start %d", ubi->leb_start);
0691
0692
0693 if (ubi->vid_hdr_shift % 4) {
0694 ubi_err(ubi, "unaligned VID header shift %d",
0695 ubi->vid_hdr_shift);
0696 return -EINVAL;
0697 }
0698
0699
0700 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
0701 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
0702 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
0703 ubi->leb_start & (ubi->min_io_size - 1)) {
0704 ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
0705 ubi->vid_hdr_offset, ubi->leb_start);
0706 return -EINVAL;
0707 }
0708
0709
0710
0711
0712
0713 ubi->max_erroneous = ubi->peb_count / 10;
0714 if (ubi->max_erroneous < 16)
0715 ubi->max_erroneous = 16;
0716 dbg_gen("max_erroneous %d", ubi->max_erroneous);
0717
0718
0719
0720
0721
0722
0723 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
0724 ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
0725 ubi->ro_mode = 1;
0726 }
0727
0728 ubi->leb_size = ubi->peb_size - ubi->leb_start;
0729
0730 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
0731 ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
0732 ubi->mtd->index);
0733 ubi->ro_mode = 1;
0734 }
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744 return 0;
0745 }
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757 static int autoresize(struct ubi_device *ubi, int vol_id)
0758 {
0759 struct ubi_volume_desc desc;
0760 struct ubi_volume *vol = ubi->volumes[vol_id];
0761 int err, old_reserved_pebs = vol->reserved_pebs;
0762
0763 if (ubi->ro_mode) {
0764 ubi_warn(ubi, "skip auto-resize because of R/O mode");
0765 return 0;
0766 }
0767
0768
0769
0770
0771
0772
0773 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
0774
0775 if (ubi->avail_pebs == 0) {
0776 struct ubi_vtbl_record vtbl_rec;
0777
0778
0779
0780
0781
0782 vtbl_rec = ubi->vtbl[vol_id];
0783 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
0784 if (err)
0785 ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
0786 vol_id);
0787 } else {
0788 desc.vol = vol;
0789 err = ubi_resize_volume(&desc,
0790 old_reserved_pebs + ubi->avail_pebs);
0791 if (err)
0792 ubi_err(ubi, "cannot auto-resize volume %d",
0793 vol_id);
0794 }
0795
0796 if (err)
0797 return err;
0798
0799 ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
0800 vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
0801 return 0;
0802 }
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
0821 int vid_hdr_offset, int max_beb_per1024)
0822 {
0823 struct ubi_device *ubi;
0824 int i, err;
0825
0826 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
0827 return -EINVAL;
0828
0829 if (!max_beb_per1024)
0830 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
0831
0832
0833
0834
0835
0836
0837
0838 for (i = 0; i < UBI_MAX_DEVICES; i++) {
0839 ubi = ubi_devices[i];
0840 if (ubi && mtd->index == ubi->mtd->index) {
0841 pr_err("ubi: mtd%d is already attached to ubi%d\n",
0842 mtd->index, i);
0843 return -EEXIST;
0844 }
0845 }
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855 if (mtd->type == MTD_UBIVOLUME) {
0856 pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n",
0857 mtd->index);
0858 return -EINVAL;
0859 }
0860
0861
0862
0863
0864
0865
0866
0867
0868 if (mtd->type == MTD_MLCNANDFLASH &&
0869 !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) {
0870 pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
0871 mtd->index);
0872 return -EINVAL;
0873 }
0874
0875 if (ubi_num == UBI_DEV_NUM_AUTO) {
0876
0877 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
0878 if (!ubi_devices[ubi_num])
0879 break;
0880 if (ubi_num == UBI_MAX_DEVICES) {
0881 pr_err("ubi: only %d UBI devices may be created\n",
0882 UBI_MAX_DEVICES);
0883 return -ENFILE;
0884 }
0885 } else {
0886 if (ubi_num >= UBI_MAX_DEVICES)
0887 return -EINVAL;
0888
0889
0890 if (ubi_devices[ubi_num]) {
0891 pr_err("ubi: ubi%i already exists\n", ubi_num);
0892 return -EEXIST;
0893 }
0894 }
0895
0896 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
0897 if (!ubi)
0898 return -ENOMEM;
0899
0900 device_initialize(&ubi->dev);
0901 ubi->dev.release = dev_release;
0902 ubi->dev.class = &ubi_class;
0903 ubi->dev.groups = ubi_dev_groups;
0904
0905 ubi->mtd = mtd;
0906 ubi->ubi_num = ubi_num;
0907 ubi->vid_hdr_offset = vid_hdr_offset;
0908 ubi->autoresize_vol_id = -1;
0909
0910 #ifdef CONFIG_MTD_UBI_FASTMAP
0911 ubi->fm_pool.used = ubi->fm_pool.size = 0;
0912 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
0913
0914
0915
0916
0917
0918 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
0919 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
0920 ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
0921 UBI_FM_MIN_POOL_SIZE);
0922
0923 ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
0924 ubi->fm_disabled = !fm_autoconvert;
0925 if (fm_debug)
0926 ubi_enable_dbg_chk_fastmap(ubi);
0927
0928 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
0929 <= UBI_FM_MAX_START) {
0930 ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
0931 UBI_FM_MAX_START);
0932 ubi->fm_disabled = 1;
0933 }
0934
0935 ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
0936 ubi_msg(ubi, "default fastmap WL pool size: %d",
0937 ubi->fm_wl_pool.max_size);
0938 #else
0939 ubi->fm_disabled = 1;
0940 #endif
0941 mutex_init(&ubi->buf_mutex);
0942 mutex_init(&ubi->ckvol_mutex);
0943 mutex_init(&ubi->device_mutex);
0944 spin_lock_init(&ubi->volumes_lock);
0945 init_rwsem(&ubi->fm_protect);
0946 init_rwsem(&ubi->fm_eba_sem);
0947
0948 ubi_msg(ubi, "attaching mtd%d", mtd->index);
0949
0950 err = io_init(ubi, max_beb_per1024);
0951 if (err)
0952 goto out_free;
0953
0954 err = -ENOMEM;
0955 ubi->peb_buf = vmalloc(ubi->peb_size);
0956 if (!ubi->peb_buf)
0957 goto out_free;
0958
0959 #ifdef CONFIG_MTD_UBI_FASTMAP
0960 ubi->fm_size = ubi_calc_fm_size(ubi);
0961 ubi->fm_buf = vzalloc(ubi->fm_size);
0962 if (!ubi->fm_buf)
0963 goto out_free;
0964 #endif
0965 err = ubi_attach(ubi, 0);
0966 if (err) {
0967 ubi_err(ubi, "failed to attach mtd%d, error %d",
0968 mtd->index, err);
0969 goto out_free;
0970 }
0971
0972 if (ubi->autoresize_vol_id != -1) {
0973 err = autoresize(ubi, ubi->autoresize_vol_id);
0974 if (err)
0975 goto out_detach;
0976 }
0977
0978 err = uif_init(ubi);
0979 if (err)
0980 goto out_detach;
0981
0982 err = ubi_debugfs_init_dev(ubi);
0983 if (err)
0984 goto out_uif;
0985
0986 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
0987 if (IS_ERR(ubi->bgt_thread)) {
0988 err = PTR_ERR(ubi->bgt_thread);
0989 ubi_err(ubi, "cannot spawn \"%s\", error %d",
0990 ubi->bgt_name, err);
0991 goto out_debugfs;
0992 }
0993
0994 ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
0995 mtd->index, mtd->name, ubi->flash_size >> 20);
0996 ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
0997 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
0998 ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
0999 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
1000 ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
1001 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
1002 ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
1003 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
1004 ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
1005 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1006 ubi->vtbl_slots);
1007 ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
1008 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1009 ubi->image_seq);
1010 ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
1011 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
1012
1013
1014
1015
1016
1017 spin_lock(&ubi->wl_lock);
1018 ubi->thread_enabled = 1;
1019 wake_up_process(ubi->bgt_thread);
1020 spin_unlock(&ubi->wl_lock);
1021
1022 ubi_devices[ubi_num] = ubi;
1023 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
1024 return ubi_num;
1025
1026 out_debugfs:
1027 ubi_debugfs_exit_dev(ubi);
1028 out_uif:
1029 uif_close(ubi);
1030 out_detach:
1031 ubi_wl_close(ubi);
1032 ubi_free_all_volumes(ubi);
1033 vfree(ubi->vtbl);
1034 out_free:
1035 vfree(ubi->peb_buf);
1036 vfree(ubi->fm_buf);
1037 put_device(&ubi->dev);
1038 return err;
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 int ubi_detach_mtd_dev(int ubi_num, int anyway)
1055 {
1056 struct ubi_device *ubi;
1057
1058 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1059 return -EINVAL;
1060
1061 ubi = ubi_get_device(ubi_num);
1062 if (!ubi)
1063 return -EINVAL;
1064
1065 spin_lock(&ubi_devices_lock);
1066 put_device(&ubi->dev);
1067 ubi->ref_count -= 1;
1068 if (ubi->ref_count) {
1069 if (!anyway) {
1070 spin_unlock(&ubi_devices_lock);
1071 return -EBUSY;
1072 }
1073
1074 ubi_err(ubi, "%s reference count %d, destroy anyway",
1075 ubi->ubi_name, ubi->ref_count);
1076 }
1077 ubi_devices[ubi_num] = NULL;
1078 spin_unlock(&ubi_devices_lock);
1079
1080 ubi_assert(ubi_num == ubi->ubi_num);
1081 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1082 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
1083 #ifdef CONFIG_MTD_UBI_FASTMAP
1084
1085
1086
1087
1088 if (!ubi_dbg_chk_fastmap(ubi))
1089 ubi_update_fastmap(ubi);
1090 #endif
1091
1092
1093
1094
1095 if (ubi->bgt_thread)
1096 kthread_stop(ubi->bgt_thread);
1097
1098 #ifdef CONFIG_MTD_UBI_FASTMAP
1099 cancel_work_sync(&ubi->fm_work);
1100 #endif
1101 ubi_debugfs_exit_dev(ubi);
1102 uif_close(ubi);
1103
1104 ubi_wl_close(ubi);
1105 ubi_free_internal_volumes(ubi);
1106 vfree(ubi->vtbl);
1107 vfree(ubi->peb_buf);
1108 vfree(ubi->fm_buf);
1109 ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
1110 put_mtd_device(ubi->mtd);
1111 put_device(&ubi->dev);
1112 return 0;
1113 }
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1124 {
1125 int err, minor;
1126 struct path path;
1127 struct kstat stat;
1128
1129
1130 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1131 if (err)
1132 return ERR_PTR(err);
1133
1134 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
1135 path_put(&path);
1136 if (err)
1137 return ERR_PTR(err);
1138
1139
1140 if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
1141 return ERR_PTR(-EINVAL);
1142
1143 minor = MINOR(stat.rdev);
1144
1145 if (minor & 1)
1146
1147
1148
1149
1150 return ERR_PTR(-EINVAL);
1151
1152 return get_mtd_device(NULL, minor / 2);
1153 }
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1166 {
1167 struct mtd_info *mtd;
1168 int mtd_num;
1169 char *endp;
1170
1171 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1172 if (*endp != '\0' || mtd_dev == endp) {
1173
1174
1175
1176
1177 mtd = get_mtd_device_nm(mtd_dev);
1178 if (PTR_ERR(mtd) == -ENODEV)
1179
1180 mtd = open_mtd_by_chdev(mtd_dev);
1181 } else
1182 mtd = get_mtd_device(NULL, mtd_num);
1183
1184 return mtd;
1185 }
1186
1187 static int __init ubi_init(void)
1188 {
1189 int err, i, k;
1190
1191
1192 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1193 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1194
1195 if (mtd_devs > UBI_MAX_DEVICES) {
1196 pr_err("UBI error: too many MTD devices, maximum is %d\n",
1197 UBI_MAX_DEVICES);
1198 return -EINVAL;
1199 }
1200
1201
1202 err = class_register(&ubi_class);
1203 if (err < 0)
1204 return err;
1205
1206 err = misc_register(&ubi_ctrl_cdev);
1207 if (err) {
1208 pr_err("UBI error: cannot register device\n");
1209 goto out;
1210 }
1211
1212 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
1213 sizeof(struct ubi_wl_entry),
1214 0, 0, NULL);
1215 if (!ubi_wl_entry_slab) {
1216 err = -ENOMEM;
1217 goto out_dev_unreg;
1218 }
1219
1220 err = ubi_debugfs_init();
1221 if (err)
1222 goto out_slab;
1223
1224
1225
1226 for (i = 0; i < mtd_devs; i++) {
1227 struct mtd_dev_param *p = &mtd_dev_param[i];
1228 struct mtd_info *mtd;
1229
1230 cond_resched();
1231
1232 mtd = open_mtd_device(p->name);
1233 if (IS_ERR(mtd)) {
1234 err = PTR_ERR(mtd);
1235 pr_err("UBI error: cannot open mtd %s, error %d\n",
1236 p->name, err);
1237
1238 if (ubi_is_module())
1239 goto out_detach;
1240 continue;
1241 }
1242
1243 mutex_lock(&ubi_devices_mutex);
1244 err = ubi_attach_mtd_dev(mtd, p->ubi_num,
1245 p->vid_hdr_offs, p->max_beb_per1024);
1246 mutex_unlock(&ubi_devices_mutex);
1247 if (err < 0) {
1248 pr_err("UBI error: cannot attach mtd%d\n",
1249 mtd->index);
1250 put_mtd_device(mtd);
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 if (ubi_is_module())
1266 goto out_detach;
1267 }
1268 }
1269
1270 err = ubiblock_init();
1271 if (err) {
1272 pr_err("UBI error: block: cannot initialize, error %d\n", err);
1273
1274
1275 if (ubi_is_module())
1276 goto out_detach;
1277 }
1278
1279 return 0;
1280
1281 out_detach:
1282 for (k = 0; k < i; k++)
1283 if (ubi_devices[k]) {
1284 mutex_lock(&ubi_devices_mutex);
1285 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1286 mutex_unlock(&ubi_devices_mutex);
1287 }
1288 ubi_debugfs_exit();
1289 out_slab:
1290 kmem_cache_destroy(ubi_wl_entry_slab);
1291 out_dev_unreg:
1292 misc_deregister(&ubi_ctrl_cdev);
1293 out:
1294 class_unregister(&ubi_class);
1295 pr_err("UBI error: cannot initialize UBI, error %d\n", err);
1296 return err;
1297 }
1298 late_initcall(ubi_init);
1299
1300 static void __exit ubi_exit(void)
1301 {
1302 int i;
1303
1304 ubiblock_exit();
1305
1306 for (i = 0; i < UBI_MAX_DEVICES; i++)
1307 if (ubi_devices[i]) {
1308 mutex_lock(&ubi_devices_mutex);
1309 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1310 mutex_unlock(&ubi_devices_mutex);
1311 }
1312 ubi_debugfs_exit();
1313 kmem_cache_destroy(ubi_wl_entry_slab);
1314 misc_deregister(&ubi_ctrl_cdev);
1315 class_unregister(&ubi_class);
1316 }
1317 module_exit(ubi_exit);
1318
1319
1320
1321
1322
1323
1324
1325
1326 static int bytes_str_to_int(const char *str)
1327 {
1328 char *endp;
1329 unsigned long result;
1330
1331 result = simple_strtoul(str, &endp, 0);
1332 if (str == endp || result >= INT_MAX) {
1333 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1334 return -EINVAL;
1335 }
1336
1337 switch (*endp) {
1338 case 'G':
1339 result *= 1024;
1340 fallthrough;
1341 case 'M':
1342 result *= 1024;
1343 fallthrough;
1344 case 'K':
1345 result *= 1024;
1346 break;
1347 case '\0':
1348 break;
1349 default:
1350 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1351 return -EINVAL;
1352 }
1353
1354 return result;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
1366 {
1367 int i, len;
1368 struct mtd_dev_param *p;
1369 char buf[MTD_PARAM_LEN_MAX];
1370 char *pbuf = &buf[0];
1371 char *tokens[MTD_PARAM_MAX_COUNT], *token;
1372
1373 if (!val)
1374 return -EINVAL;
1375
1376 if (mtd_devs == UBI_MAX_DEVICES) {
1377 pr_err("UBI error: too many parameters, max. is %d\n",
1378 UBI_MAX_DEVICES);
1379 return -EINVAL;
1380 }
1381
1382 len = strnlen(val, MTD_PARAM_LEN_MAX);
1383 if (len == MTD_PARAM_LEN_MAX) {
1384 pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1385 val, MTD_PARAM_LEN_MAX);
1386 return -EINVAL;
1387 }
1388
1389 if (len == 0) {
1390 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
1391 return 0;
1392 }
1393
1394 strcpy(buf, val);
1395
1396
1397 if (buf[len - 1] == '\n')
1398 buf[len - 1] = '\0';
1399
1400 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
1401 tokens[i] = strsep(&pbuf, ",");
1402
1403 if (pbuf) {
1404 pr_err("UBI error: too many arguments at \"%s\"\n", val);
1405 return -EINVAL;
1406 }
1407
1408 p = &mtd_dev_param[mtd_devs];
1409 strcpy(&p->name[0], tokens[0]);
1410
1411 token = tokens[1];
1412 if (token) {
1413 p->vid_hdr_offs = bytes_str_to_int(token);
1414
1415 if (p->vid_hdr_offs < 0)
1416 return p->vid_hdr_offs;
1417 }
1418
1419 token = tokens[2];
1420 if (token) {
1421 int err = kstrtoint(token, 10, &p->max_beb_per1024);
1422
1423 if (err) {
1424 pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1425 token);
1426 return -EINVAL;
1427 }
1428 }
1429
1430 token = tokens[3];
1431 if (token) {
1432 int err = kstrtoint(token, 10, &p->ubi_num);
1433
1434 if (err) {
1435 pr_err("UBI error: bad value for ubi_num parameter: %s",
1436 token);
1437 return -EINVAL;
1438 }
1439 } else
1440 p->ubi_num = UBI_DEV_NUM_AUTO;
1441
1442 mtd_devs += 1;
1443 return 0;
1444 }
1445
1446 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 0400);
1447 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
1448 "Multiple \"mtd\" parameters may be specified.\n"
1449 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1450 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1451 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1452 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
1453 "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
1454 "\n"
1455 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1456 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1457 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
1458 "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
1459 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
1460 #ifdef CONFIG_MTD_UBI_FASTMAP
1461 module_param(fm_autoconvert, bool, 0644);
1462 MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1463 module_param(fm_debug, bool, 0);
1464 MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
1465 #endif
1466 MODULE_VERSION(__stringify(UBI_VERSION));
1467 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1468 MODULE_AUTHOR("Artem Bityutskiy");
1469 MODULE_LICENSE("GPL");