0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/module.h>
0031 #include <linux/init.h>
0032 #include <linux/err.h>
0033 #include <linux/kernel.h>
0034 #include <linux/list.h>
0035 #include <linux/mutex.h>
0036 #include <linux/slab.h>
0037 #include <linux/mtd/ubi.h>
0038 #include <linux/workqueue.h>
0039 #include <linux/blkdev.h>
0040 #include <linux/blk-mq.h>
0041 #include <linux/hdreg.h>
0042 #include <linux/scatterlist.h>
0043 #include <linux/idr.h>
0044 #include <asm/div64.h>
0045
0046 #include "ubi-media.h"
0047 #include "ubi.h"
0048
0049
0050 #define UBIBLOCK_MAX_DEVICES 32
0051
0052
0053 #define UBIBLOCK_PARAM_LEN 63
0054
0055
0056 #define UBIBLOCK_PARAM_COUNT 2
0057
0058 struct ubiblock_param {
0059 int ubi_num;
0060 int vol_id;
0061 char name[UBIBLOCK_PARAM_LEN+1];
0062 };
0063
0064 struct ubiblock_pdu {
0065 struct work_struct work;
0066 struct ubi_sgl usgl;
0067 };
0068
0069
0070 static int ubiblock_devs __initdata;
0071
0072
0073 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
0074
0075 struct ubiblock {
0076 struct ubi_volume_desc *desc;
0077 int ubi_num;
0078 int vol_id;
0079 int refcnt;
0080 int leb_size;
0081
0082 struct gendisk *gd;
0083 struct request_queue *rq;
0084
0085 struct workqueue_struct *wq;
0086
0087 struct mutex dev_mutex;
0088 struct list_head list;
0089 struct blk_mq_tag_set tag_set;
0090 };
0091
0092
0093 static LIST_HEAD(ubiblock_devices);
0094 static DEFINE_IDR(ubiblock_minor_idr);
0095
0096 static DEFINE_MUTEX(devices_mutex);
0097 static int ubiblock_major;
0098
0099 static int __init ubiblock_set_param(const char *val,
0100 const struct kernel_param *kp)
0101 {
0102 int i, ret;
0103 size_t len;
0104 struct ubiblock_param *param;
0105 char buf[UBIBLOCK_PARAM_LEN];
0106 char *pbuf = &buf[0];
0107 char *tokens[UBIBLOCK_PARAM_COUNT];
0108
0109 if (!val)
0110 return -EINVAL;
0111
0112 len = strnlen(val, UBIBLOCK_PARAM_LEN);
0113 if (len == 0) {
0114 pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
0115 return 0;
0116 }
0117
0118 if (len == UBIBLOCK_PARAM_LEN) {
0119 pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
0120 val, UBIBLOCK_PARAM_LEN);
0121 return -EINVAL;
0122 }
0123
0124 strcpy(buf, val);
0125
0126
0127 if (buf[len - 1] == '\n')
0128 buf[len - 1] = '\0';
0129
0130 for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
0131 tokens[i] = strsep(&pbuf, ",");
0132
0133 param = &ubiblock_param[ubiblock_devs];
0134 if (tokens[1]) {
0135
0136 ret = kstrtoint(tokens[0], 10, ¶m->ubi_num);
0137 if (ret < 0)
0138 return -EINVAL;
0139
0140
0141 ret = kstrtoint(tokens[1], 10, ¶m->vol_id);
0142 if (ret < 0) {
0143 param->vol_id = -1;
0144 strcpy(param->name, tokens[1]);
0145 }
0146
0147 } else {
0148
0149 strcpy(param->name, tokens[0]);
0150 param->ubi_num = -1;
0151 param->vol_id = -1;
0152 }
0153
0154 ubiblock_devs++;
0155
0156 return 0;
0157 }
0158
0159 static const struct kernel_param_ops ubiblock_param_ops = {
0160 .set = ubiblock_set_param,
0161 };
0162 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
0163 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
0164 "Multiple \"block\" parameters may be specified.\n"
0165 "UBI volumes may be specified by their number, name, or path to the device node.\n"
0166 "Examples\n"
0167 "Using the UBI volume path:\n"
0168 "ubi.block=/dev/ubi0_0\n"
0169 "Using the UBI device, and the volume name:\n"
0170 "ubi.block=0,rootfs\n"
0171 "Using both UBI device number and UBI volume number:\n"
0172 "ubi.block=0,0\n");
0173
0174 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
0175 {
0176 struct ubiblock *dev;
0177
0178 list_for_each_entry(dev, &ubiblock_devices, list)
0179 if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
0180 return dev;
0181 return NULL;
0182 }
0183
0184 static int ubiblock_read(struct ubiblock_pdu *pdu)
0185 {
0186 int ret, leb, offset, bytes_left, to_read;
0187 u64 pos;
0188 struct request *req = blk_mq_rq_from_pdu(pdu);
0189 struct ubiblock *dev = req->q->queuedata;
0190
0191 to_read = blk_rq_bytes(req);
0192 pos = blk_rq_pos(req) << 9;
0193
0194
0195 offset = do_div(pos, dev->leb_size);
0196 leb = pos;
0197 bytes_left = to_read;
0198
0199 while (bytes_left) {
0200
0201
0202
0203
0204 if (offset + to_read > dev->leb_size)
0205 to_read = dev->leb_size - offset;
0206
0207 ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
0208 if (ret < 0)
0209 return ret;
0210
0211 bytes_left -= to_read;
0212 to_read = bytes_left;
0213 leb += 1;
0214 offset = 0;
0215 }
0216 return 0;
0217 }
0218
0219 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
0220 {
0221 struct ubiblock *dev = bdev->bd_disk->private_data;
0222 int ret;
0223
0224 mutex_lock(&dev->dev_mutex);
0225 if (dev->refcnt > 0) {
0226
0227
0228
0229
0230 goto out_done;
0231 }
0232
0233
0234
0235
0236
0237
0238 if (mode & FMODE_WRITE) {
0239 ret = -EROFS;
0240 goto out_unlock;
0241 }
0242
0243 dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
0244 if (IS_ERR(dev->desc)) {
0245 dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
0246 dev->ubi_num, dev->vol_id);
0247 ret = PTR_ERR(dev->desc);
0248 dev->desc = NULL;
0249 goto out_unlock;
0250 }
0251
0252 out_done:
0253 dev->refcnt++;
0254 mutex_unlock(&dev->dev_mutex);
0255 return 0;
0256
0257 out_unlock:
0258 mutex_unlock(&dev->dev_mutex);
0259 return ret;
0260 }
0261
0262 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
0263 {
0264 struct ubiblock *dev = gd->private_data;
0265
0266 mutex_lock(&dev->dev_mutex);
0267 dev->refcnt--;
0268 if (dev->refcnt == 0) {
0269 ubi_close_volume(dev->desc);
0270 dev->desc = NULL;
0271 }
0272 mutex_unlock(&dev->dev_mutex);
0273 }
0274
0275 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
0276 {
0277
0278 geo->heads = 1;
0279 geo->cylinders = 1;
0280 geo->sectors = get_capacity(bdev->bd_disk);
0281 geo->start = 0;
0282 return 0;
0283 }
0284
0285 static const struct block_device_operations ubiblock_ops = {
0286 .owner = THIS_MODULE,
0287 .open = ubiblock_open,
0288 .release = ubiblock_release,
0289 .getgeo = ubiblock_getgeo,
0290 };
0291
0292 static void ubiblock_do_work(struct work_struct *work)
0293 {
0294 int ret;
0295 struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
0296 struct request *req = blk_mq_rq_from_pdu(pdu);
0297 struct req_iterator iter;
0298 struct bio_vec bvec;
0299
0300 blk_mq_start_request(req);
0301
0302
0303
0304
0305
0306
0307 blk_rq_map_sg(req->q, req, pdu->usgl.sg);
0308
0309 ret = ubiblock_read(pdu);
0310
0311 rq_for_each_segment(bvec, req, iter)
0312 flush_dcache_page(bvec.bv_page);
0313
0314 blk_mq_end_request(req, errno_to_blk_status(ret));
0315 }
0316
0317 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
0318 const struct blk_mq_queue_data *bd)
0319 {
0320 struct request *req = bd->rq;
0321 struct ubiblock *dev = hctx->queue->queuedata;
0322 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
0323
0324 switch (req_op(req)) {
0325 case REQ_OP_READ:
0326 ubi_sgl_init(&pdu->usgl);
0327 queue_work(dev->wq, &pdu->work);
0328 return BLK_STS_OK;
0329 default:
0330 return BLK_STS_IOERR;
0331 }
0332
0333 }
0334
0335 static int ubiblock_init_request(struct blk_mq_tag_set *set,
0336 struct request *req, unsigned int hctx_idx,
0337 unsigned int numa_node)
0338 {
0339 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
0340
0341 sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
0342 INIT_WORK(&pdu->work, ubiblock_do_work);
0343
0344 return 0;
0345 }
0346
0347 static const struct blk_mq_ops ubiblock_mq_ops = {
0348 .queue_rq = ubiblock_queue_rq,
0349 .init_request = ubiblock_init_request,
0350 };
0351
0352 static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
0353 {
0354 u64 size = vi->used_bytes >> 9;
0355
0356 if (vi->used_bytes % 512) {
0357 pr_warn("UBI: block: volume size is not a multiple of 512, "
0358 "last %llu bytes are ignored!\n",
0359 vi->used_bytes - (size << 9));
0360 }
0361
0362 if ((sector_t)size != size)
0363 return -EFBIG;
0364
0365 *disk_capacity = size;
0366
0367 return 0;
0368 }
0369
0370 int ubiblock_create(struct ubi_volume_info *vi)
0371 {
0372 struct ubiblock *dev;
0373 struct gendisk *gd;
0374 u64 disk_capacity;
0375 int ret;
0376
0377 ret = calc_disk_capacity(vi, &disk_capacity);
0378 if (ret) {
0379 return ret;
0380 }
0381
0382
0383 mutex_lock(&devices_mutex);
0384 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
0385 ret = -EEXIST;
0386 goto out_unlock;
0387 }
0388
0389 dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
0390 if (!dev) {
0391 ret = -ENOMEM;
0392 goto out_unlock;
0393 }
0394
0395 mutex_init(&dev->dev_mutex);
0396
0397 dev->ubi_num = vi->ubi_num;
0398 dev->vol_id = vi->vol_id;
0399 dev->leb_size = vi->usable_leb_size;
0400
0401 dev->tag_set.ops = &ubiblock_mq_ops;
0402 dev->tag_set.queue_depth = 64;
0403 dev->tag_set.numa_node = NUMA_NO_NODE;
0404 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
0405 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
0406 dev->tag_set.driver_data = dev;
0407 dev->tag_set.nr_hw_queues = 1;
0408
0409 ret = blk_mq_alloc_tag_set(&dev->tag_set);
0410 if (ret) {
0411 dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
0412 goto out_free_dev;;
0413 }
0414
0415
0416
0417 gd = blk_mq_alloc_disk(&dev->tag_set, dev);
0418 if (IS_ERR(gd)) {
0419 ret = PTR_ERR(gd);
0420 goto out_free_tags;
0421 }
0422
0423 gd->fops = &ubiblock_ops;
0424 gd->major = ubiblock_major;
0425 gd->minors = 1;
0426 gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
0427 if (gd->first_minor < 0) {
0428 dev_err(disk_to_dev(gd),
0429 "block: dynamic minor allocation failed");
0430 ret = -ENODEV;
0431 goto out_cleanup_disk;
0432 }
0433 gd->flags |= GENHD_FL_NO_PART;
0434 gd->private_data = dev;
0435 sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
0436 set_capacity(gd, disk_capacity);
0437 dev->gd = gd;
0438
0439 dev->rq = gd->queue;
0440 blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
0441
0442
0443
0444
0445
0446 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
0447 if (!dev->wq) {
0448 ret = -ENOMEM;
0449 goto out_remove_minor;
0450 }
0451
0452 list_add_tail(&dev->list, &ubiblock_devices);
0453
0454
0455 ret = add_disk(dev->gd);
0456 if (ret)
0457 goto out_destroy_wq;
0458
0459 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
0460 dev->ubi_num, dev->vol_id, vi->name);
0461 mutex_unlock(&devices_mutex);
0462 return 0;
0463
0464 out_destroy_wq:
0465 list_del(&dev->list);
0466 destroy_workqueue(dev->wq);
0467 out_remove_minor:
0468 idr_remove(&ubiblock_minor_idr, gd->first_minor);
0469 out_cleanup_disk:
0470 put_disk(dev->gd);
0471 out_free_tags:
0472 blk_mq_free_tag_set(&dev->tag_set);
0473 out_free_dev:
0474 kfree(dev);
0475 out_unlock:
0476 mutex_unlock(&devices_mutex);
0477
0478 return ret;
0479 }
0480
0481 static void ubiblock_cleanup(struct ubiblock *dev)
0482 {
0483
0484 del_gendisk(dev->gd);
0485
0486 destroy_workqueue(dev->wq);
0487
0488 dev_info(disk_to_dev(dev->gd), "released");
0489 put_disk(dev->gd);
0490 blk_mq_free_tag_set(&dev->tag_set);
0491 idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
0492 }
0493
0494 int ubiblock_remove(struct ubi_volume_info *vi)
0495 {
0496 struct ubiblock *dev;
0497 int ret;
0498
0499 mutex_lock(&devices_mutex);
0500 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
0501 if (!dev) {
0502 ret = -ENODEV;
0503 goto out_unlock;
0504 }
0505
0506
0507 mutex_lock(&dev->dev_mutex);
0508 if (dev->refcnt > 0) {
0509 ret = -EBUSY;
0510 goto out_unlock_dev;
0511 }
0512
0513
0514 list_del(&dev->list);
0515 ubiblock_cleanup(dev);
0516 mutex_unlock(&dev->dev_mutex);
0517 mutex_unlock(&devices_mutex);
0518
0519 kfree(dev);
0520 return 0;
0521
0522 out_unlock_dev:
0523 mutex_unlock(&dev->dev_mutex);
0524 out_unlock:
0525 mutex_unlock(&devices_mutex);
0526 return ret;
0527 }
0528
0529 static int ubiblock_resize(struct ubi_volume_info *vi)
0530 {
0531 struct ubiblock *dev;
0532 u64 disk_capacity;
0533 int ret;
0534
0535
0536
0537
0538
0539
0540 mutex_lock(&devices_mutex);
0541 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
0542 if (!dev) {
0543 mutex_unlock(&devices_mutex);
0544 return -ENODEV;
0545 }
0546
0547 ret = calc_disk_capacity(vi, &disk_capacity);
0548 if (ret) {
0549 mutex_unlock(&devices_mutex);
0550 if (ret == -EFBIG) {
0551 dev_warn(disk_to_dev(dev->gd),
0552 "the volume is too big (%d LEBs), cannot resize",
0553 vi->size);
0554 }
0555 return ret;
0556 }
0557
0558 mutex_lock(&dev->dev_mutex);
0559
0560 if (get_capacity(dev->gd) != disk_capacity) {
0561 set_capacity(dev->gd, disk_capacity);
0562 dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
0563 vi->used_bytes);
0564 }
0565 mutex_unlock(&dev->dev_mutex);
0566 mutex_unlock(&devices_mutex);
0567 return 0;
0568 }
0569
0570 static int ubiblock_notify(struct notifier_block *nb,
0571 unsigned long notification_type, void *ns_ptr)
0572 {
0573 struct ubi_notification *nt = ns_ptr;
0574
0575 switch (notification_type) {
0576 case UBI_VOLUME_ADDED:
0577
0578
0579
0580
0581 break;
0582 case UBI_VOLUME_REMOVED:
0583 ubiblock_remove(&nt->vi);
0584 break;
0585 case UBI_VOLUME_RESIZED:
0586 ubiblock_resize(&nt->vi);
0587 break;
0588 case UBI_VOLUME_UPDATED:
0589
0590
0591
0592
0593 if (nt->vi.vol_type == UBI_STATIC_VOLUME)
0594 ubiblock_resize(&nt->vi);
0595 break;
0596 default:
0597 break;
0598 }
0599 return NOTIFY_OK;
0600 }
0601
0602 static struct notifier_block ubiblock_notifier = {
0603 .notifier_call = ubiblock_notify,
0604 };
0605
0606 static struct ubi_volume_desc * __init
0607 open_volume_desc(const char *name, int ubi_num, int vol_id)
0608 {
0609 if (ubi_num == -1)
0610
0611 return ubi_open_volume_path(name, UBI_READONLY);
0612 else if (vol_id == -1)
0613
0614 return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
0615 else
0616 return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
0617 }
0618
0619 static void __init ubiblock_create_from_param(void)
0620 {
0621 int i, ret = 0;
0622 struct ubiblock_param *p;
0623 struct ubi_volume_desc *desc;
0624 struct ubi_volume_info vi;
0625
0626
0627
0628
0629
0630
0631
0632 for (i = 0; i < ubiblock_devs; i++) {
0633 p = &ubiblock_param[i];
0634
0635 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
0636 if (IS_ERR(desc)) {
0637 pr_err(
0638 "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
0639 p->ubi_num, p->vol_id, PTR_ERR(desc));
0640 continue;
0641 }
0642
0643 ubi_get_volume_info(desc, &vi);
0644 ubi_close_volume(desc);
0645
0646 ret = ubiblock_create(&vi);
0647 if (ret) {
0648 pr_err(
0649 "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
0650 vi.name, p->ubi_num, p->vol_id, ret);
0651 continue;
0652 }
0653 }
0654 }
0655
0656 static void ubiblock_remove_all(void)
0657 {
0658 struct ubiblock *next;
0659 struct ubiblock *dev;
0660
0661 mutex_lock(&devices_mutex);
0662 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
0663
0664 WARN_ON(dev->desc);
0665
0666 list_del(&dev->list);
0667 ubiblock_cleanup(dev);
0668 kfree(dev);
0669 }
0670 mutex_unlock(&devices_mutex);
0671 }
0672
0673 int __init ubiblock_init(void)
0674 {
0675 int ret;
0676
0677 ubiblock_major = register_blkdev(0, "ubiblock");
0678 if (ubiblock_major < 0)
0679 return ubiblock_major;
0680
0681
0682
0683
0684
0685
0686 ubiblock_create_from_param();
0687
0688
0689
0690
0691
0692 ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
0693 if (ret)
0694 goto err_unreg;
0695 return 0;
0696
0697 err_unreg:
0698 unregister_blkdev(ubiblock_major, "ubiblock");
0699 ubiblock_remove_all();
0700 return ret;
0701 }
0702
0703 void __exit ubiblock_exit(void)
0704 {
0705 ubi_unregister_volume_notifier(&ubiblock_notifier);
0706 ubiblock_remove_all();
0707 unregister_blkdev(ubiblock_major, "ubiblock");
0708 }