0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/slab.h>
0010 #include <linux/module.h>
0011 #include <linux/list.h>
0012 #include <linux/fs.h>
0013 #include <linux/mtd/blktrans.h>
0014 #include <linux/mtd/mtd.h>
0015 #include <linux/blkdev.h>
0016 #include <linux/blk-mq.h>
0017 #include <linux/blkpg.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/hdreg.h>
0020 #include <linux/mutex.h>
0021 #include <linux/uaccess.h>
0022
0023 #include "mtdcore.h"
0024
0025 static LIST_HEAD(blktrans_majors);
0026
0027 static void blktrans_dev_release(struct kref *kref)
0028 {
0029 struct mtd_blktrans_dev *dev =
0030 container_of(kref, struct mtd_blktrans_dev, ref);
0031
0032 put_disk(dev->disk);
0033 blk_mq_free_tag_set(dev->tag_set);
0034 kfree(dev->tag_set);
0035 list_del(&dev->list);
0036 kfree(dev);
0037 }
0038
0039 static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
0040 {
0041 kref_put(&dev->ref, blktrans_dev_release);
0042 }
0043
0044
0045 static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
0046 struct mtd_blktrans_dev *dev,
0047 struct request *req)
0048 {
0049 struct req_iterator iter;
0050 struct bio_vec bvec;
0051 unsigned long block, nsect;
0052 char *buf;
0053
0054 block = blk_rq_pos(req) << 9 >> tr->blkshift;
0055 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
0056
0057 switch (req_op(req)) {
0058 case REQ_OP_FLUSH:
0059 if (tr->flush(dev))
0060 return BLK_STS_IOERR;
0061 return BLK_STS_OK;
0062 case REQ_OP_DISCARD:
0063 if (tr->discard(dev, block, nsect))
0064 return BLK_STS_IOERR;
0065 return BLK_STS_OK;
0066 case REQ_OP_READ:
0067 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
0068 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
0069 if (tr->readsect(dev, block, buf)) {
0070 kunmap(bio_page(req->bio));
0071 return BLK_STS_IOERR;
0072 }
0073 }
0074 kunmap(bio_page(req->bio));
0075
0076 rq_for_each_segment(bvec, req, iter)
0077 flush_dcache_page(bvec.bv_page);
0078 return BLK_STS_OK;
0079 case REQ_OP_WRITE:
0080 if (!tr->writesect)
0081 return BLK_STS_IOERR;
0082
0083 rq_for_each_segment(bvec, req, iter)
0084 flush_dcache_page(bvec.bv_page);
0085
0086 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
0087 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
0088 if (tr->writesect(dev, block, buf)) {
0089 kunmap(bio_page(req->bio));
0090 return BLK_STS_IOERR;
0091 }
0092 }
0093 kunmap(bio_page(req->bio));
0094 return BLK_STS_OK;
0095 default:
0096 return BLK_STS_IOERR;
0097 }
0098 }
0099
0100 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
0101 {
0102 return dev->bg_stop;
0103 }
0104 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
0105
0106 static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
0107 {
0108 struct request *rq;
0109
0110 rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
0111 if (rq) {
0112 list_del_init(&rq->queuelist);
0113 blk_mq_start_request(rq);
0114 return rq;
0115 }
0116
0117 return NULL;
0118 }
0119
0120 static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
0121 __releases(&dev->queue_lock)
0122 __acquires(&dev->queue_lock)
0123 {
0124 struct mtd_blktrans_ops *tr = dev->tr;
0125 struct request *req = NULL;
0126 int background_done = 0;
0127
0128 while (1) {
0129 blk_status_t res;
0130
0131 dev->bg_stop = false;
0132 if (!req && !(req = mtd_next_request(dev))) {
0133 if (tr->background && !background_done) {
0134 spin_unlock_irq(&dev->queue_lock);
0135 mutex_lock(&dev->lock);
0136 tr->background(dev);
0137 mutex_unlock(&dev->lock);
0138 spin_lock_irq(&dev->queue_lock);
0139
0140
0141
0142
0143 background_done = !dev->bg_stop;
0144 continue;
0145 }
0146 break;
0147 }
0148
0149 spin_unlock_irq(&dev->queue_lock);
0150
0151 mutex_lock(&dev->lock);
0152 res = do_blktrans_request(dev->tr, dev, req);
0153 mutex_unlock(&dev->lock);
0154
0155 if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
0156 __blk_mq_end_request(req, res);
0157 req = NULL;
0158 }
0159
0160 background_done = 0;
0161 cond_resched();
0162 spin_lock_irq(&dev->queue_lock);
0163 }
0164 }
0165
0166 static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
0167 const struct blk_mq_queue_data *bd)
0168 {
0169 struct mtd_blktrans_dev *dev;
0170
0171 dev = hctx->queue->queuedata;
0172 if (!dev) {
0173 blk_mq_start_request(bd->rq);
0174 return BLK_STS_IOERR;
0175 }
0176
0177 spin_lock_irq(&dev->queue_lock);
0178 list_add_tail(&bd->rq->queuelist, &dev->rq_list);
0179 mtd_blktrans_work(dev);
0180 spin_unlock_irq(&dev->queue_lock);
0181
0182 return BLK_STS_OK;
0183 }
0184
0185 static int blktrans_open(struct block_device *bdev, fmode_t mode)
0186 {
0187 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
0188 int ret = 0;
0189
0190 kref_get(&dev->ref);
0191
0192 mutex_lock(&dev->lock);
0193
0194 if (dev->open)
0195 goto unlock;
0196
0197 __module_get(dev->tr->owner);
0198
0199 if (!dev->mtd)
0200 goto unlock;
0201
0202 if (dev->tr->open) {
0203 ret = dev->tr->open(dev);
0204 if (ret)
0205 goto error_put;
0206 }
0207
0208 ret = __get_mtd_device(dev->mtd);
0209 if (ret)
0210 goto error_release;
0211 dev->file_mode = mode;
0212
0213 unlock:
0214 dev->open++;
0215 mutex_unlock(&dev->lock);
0216 return ret;
0217
0218 error_release:
0219 if (dev->tr->release)
0220 dev->tr->release(dev);
0221 error_put:
0222 module_put(dev->tr->owner);
0223 mutex_unlock(&dev->lock);
0224 blktrans_dev_put(dev);
0225 return ret;
0226 }
0227
0228 static void blktrans_release(struct gendisk *disk, fmode_t mode)
0229 {
0230 struct mtd_blktrans_dev *dev = disk->private_data;
0231
0232 mutex_lock(&dev->lock);
0233
0234 if (--dev->open)
0235 goto unlock;
0236
0237 module_put(dev->tr->owner);
0238
0239 if (dev->mtd) {
0240 if (dev->tr->release)
0241 dev->tr->release(dev);
0242 __put_mtd_device(dev->mtd);
0243 }
0244 unlock:
0245 mutex_unlock(&dev->lock);
0246 blktrans_dev_put(dev);
0247 }
0248
0249 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
0250 {
0251 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
0252 int ret = -ENXIO;
0253
0254 mutex_lock(&dev->lock);
0255
0256 if (!dev->mtd)
0257 goto unlock;
0258
0259 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
0260 unlock:
0261 mutex_unlock(&dev->lock);
0262 return ret;
0263 }
0264
0265 static const struct block_device_operations mtd_block_ops = {
0266 .owner = THIS_MODULE,
0267 .open = blktrans_open,
0268 .release = blktrans_release,
0269 .getgeo = blktrans_getgeo,
0270 };
0271
0272 static const struct blk_mq_ops mtd_mq_ops = {
0273 .queue_rq = mtd_queue_rq,
0274 };
0275
0276 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
0277 {
0278 struct mtd_blktrans_ops *tr = new->tr;
0279 struct mtd_blktrans_dev *d;
0280 int last_devnum = -1;
0281 struct gendisk *gd;
0282 int ret;
0283
0284 lockdep_assert_held(&mtd_table_mutex);
0285
0286 list_for_each_entry(d, &tr->devs, list) {
0287 if (new->devnum == -1) {
0288
0289 if (d->devnum != last_devnum+1) {
0290
0291 new->devnum = last_devnum+1;
0292 list_add_tail(&new->list, &d->list);
0293 goto added;
0294 }
0295 } else if (d->devnum == new->devnum) {
0296
0297 return -EBUSY;
0298 } else if (d->devnum > new->devnum) {
0299
0300 list_add_tail(&new->list, &d->list);
0301 goto added;
0302 }
0303 last_devnum = d->devnum;
0304 }
0305
0306 ret = -EBUSY;
0307 if (new->devnum == -1)
0308 new->devnum = last_devnum+1;
0309
0310
0311
0312
0313 if (new->devnum > (MINORMASK >> tr->part_bits) ||
0314 (tr->part_bits && new->devnum >= 27 * 26))
0315 return ret;
0316
0317 list_add_tail(&new->list, &tr->devs);
0318 added:
0319
0320 mutex_init(&new->lock);
0321 kref_init(&new->ref);
0322 if (!tr->writesect)
0323 new->readonly = 1;
0324
0325 ret = -ENOMEM;
0326 new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
0327 if (!new->tag_set)
0328 goto out_list_del;
0329
0330 ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
0331 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
0332 if (ret)
0333 goto out_kfree_tag_set;
0334
0335
0336 gd = blk_mq_alloc_disk(new->tag_set, new);
0337 if (IS_ERR(gd)) {
0338 ret = PTR_ERR(gd);
0339 goto out_free_tag_set;
0340 }
0341
0342 new->disk = gd;
0343 new->rq = new->disk->queue;
0344 gd->private_data = new;
0345 gd->major = tr->major;
0346 gd->first_minor = (new->devnum) << tr->part_bits;
0347 gd->minors = 1 << tr->part_bits;
0348 gd->fops = &mtd_block_ops;
0349
0350 if (tr->part_bits) {
0351 if (new->devnum < 26)
0352 snprintf(gd->disk_name, sizeof(gd->disk_name),
0353 "%s%c", tr->name, 'a' + new->devnum);
0354 else
0355 snprintf(gd->disk_name, sizeof(gd->disk_name),
0356 "%s%c%c", tr->name,
0357 'a' - 1 + new->devnum / 26,
0358 'a' + new->devnum % 26);
0359 } else {
0360 snprintf(gd->disk_name, sizeof(gd->disk_name),
0361 "%s%d", tr->name, new->devnum);
0362 gd->flags |= GENHD_FL_NO_PART;
0363 }
0364
0365 set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
0366
0367
0368 spin_lock_init(&new->queue_lock);
0369 INIT_LIST_HEAD(&new->rq_list);
0370
0371 if (tr->flush)
0372 blk_queue_write_cache(new->rq, true, false);
0373
0374 blk_queue_logical_block_size(new->rq, tr->blksize);
0375
0376 blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
0377 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
0378
0379 if (tr->discard) {
0380 blk_queue_max_discard_sectors(new->rq, UINT_MAX);
0381 new->rq->limits.discard_granularity = tr->blksize;
0382 }
0383
0384 gd->queue = new->rq;
0385
0386 if (new->readonly)
0387 set_disk_ro(gd, 1);
0388
0389 ret = device_add_disk(&new->mtd->dev, gd, NULL);
0390 if (ret)
0391 goto out_cleanup_disk;
0392
0393 if (new->disk_attributes) {
0394 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
0395 new->disk_attributes);
0396 WARN_ON(ret);
0397 }
0398 return 0;
0399
0400 out_cleanup_disk:
0401 put_disk(new->disk);
0402 out_free_tag_set:
0403 blk_mq_free_tag_set(new->tag_set);
0404 out_kfree_tag_set:
0405 kfree(new->tag_set);
0406 out_list_del:
0407 list_del(&new->list);
0408 return ret;
0409 }
0410
0411 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
0412 {
0413 unsigned long flags;
0414
0415 lockdep_assert_held(&mtd_table_mutex);
0416
0417 if (old->disk_attributes)
0418 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
0419 old->disk_attributes);
0420
0421
0422 del_gendisk(old->disk);
0423
0424
0425 spin_lock_irqsave(&old->queue_lock, flags);
0426 old->rq->queuedata = NULL;
0427 spin_unlock_irqrestore(&old->queue_lock, flags);
0428
0429
0430 blk_mq_freeze_queue(old->rq);
0431 blk_mq_quiesce_queue(old->rq);
0432 blk_mq_unquiesce_queue(old->rq);
0433 blk_mq_unfreeze_queue(old->rq);
0434
0435
0436
0437 mutex_lock(&old->lock);
0438 if (old->open) {
0439 if (old->tr->release)
0440 old->tr->release(old);
0441 __put_mtd_device(old->mtd);
0442 }
0443
0444 old->mtd = NULL;
0445
0446 mutex_unlock(&old->lock);
0447 blktrans_dev_put(old);
0448 return 0;
0449 }
0450
0451 static void blktrans_notify_remove(struct mtd_info *mtd)
0452 {
0453 struct mtd_blktrans_ops *tr;
0454 struct mtd_blktrans_dev *dev, *next;
0455
0456 list_for_each_entry(tr, &blktrans_majors, list)
0457 list_for_each_entry_safe(dev, next, &tr->devs, list)
0458 if (dev->mtd == mtd)
0459 tr->remove_dev(dev);
0460 }
0461
0462 static void blktrans_notify_add(struct mtd_info *mtd)
0463 {
0464 struct mtd_blktrans_ops *tr;
0465
0466 if (mtd->type == MTD_ABSENT)
0467 return;
0468
0469 list_for_each_entry(tr, &blktrans_majors, list)
0470 tr->add_mtd(tr, mtd);
0471 }
0472
0473 static struct mtd_notifier blktrans_notifier = {
0474 .add = blktrans_notify_add,
0475 .remove = blktrans_notify_remove,
0476 };
0477
0478 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
0479 {
0480 struct mtd_info *mtd;
0481 int ret;
0482
0483
0484
0485
0486 if (!blktrans_notifier.list.next)
0487 register_mtd_user(&blktrans_notifier);
0488
0489 ret = register_blkdev(tr->major, tr->name);
0490 if (ret < 0) {
0491 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
0492 tr->name, tr->major, ret);
0493 return ret;
0494 }
0495
0496 if (ret)
0497 tr->major = ret;
0498
0499 tr->blkshift = ffs(tr->blksize) - 1;
0500
0501 INIT_LIST_HEAD(&tr->devs);
0502
0503 mutex_lock(&mtd_table_mutex);
0504 list_add(&tr->list, &blktrans_majors);
0505 mtd_for_each_device(mtd)
0506 if (mtd->type != MTD_ABSENT)
0507 tr->add_mtd(tr, mtd);
0508 mutex_unlock(&mtd_table_mutex);
0509 return 0;
0510 }
0511
0512 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
0513 {
0514 struct mtd_blktrans_dev *dev, *next;
0515
0516 mutex_lock(&mtd_table_mutex);
0517
0518
0519 list_del(&tr->list);
0520
0521 list_for_each_entry_safe(dev, next, &tr->devs, list)
0522 tr->remove_dev(dev);
0523
0524 mutex_unlock(&mtd_table_mutex);
0525 unregister_blkdev(tr->major, tr->name);
0526
0527 BUG_ON(!list_empty(&tr->devs));
0528 return 0;
0529 }
0530
0531 static void __exit mtd_blktrans_exit(void)
0532 {
0533
0534
0535 if (blktrans_notifier.list.next)
0536 unregister_mtd_user(&blktrans_notifier);
0537 }
0538
0539 module_exit(mtd_blktrans_exit);
0540
0541 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
0542 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
0543 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
0544 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
0545
0546 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
0547 MODULE_LICENSE("GPL");
0548 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");