0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/fs.h>
0010 #include <linux/init.h>
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/sched.h>
0014 #include <linux/slab.h>
0015 #include <linux/types.h>
0016 #include <linux/vmalloc.h>
0017
0018 #include <linux/mtd/mtd.h>
0019 #include <linux/mtd/blktrans.h>
0020 #include <linux/mutex.h>
0021 #include <linux/major.h>
0022
0023
0024 struct mtdblk_dev {
0025 struct mtd_blktrans_dev mbd;
0026 int count;
0027 struct mutex cache_mutex;
0028 unsigned char *cache_data;
0029 unsigned long cache_offset;
0030 unsigned int cache_size;
0031 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
0032 };
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 static int erase_write (struct mtd_info *mtd, unsigned long pos,
0045 unsigned int len, const char *buf)
0046 {
0047 struct erase_info erase;
0048 size_t retlen;
0049 int ret;
0050
0051
0052
0053
0054 erase.addr = pos;
0055 erase.len = len;
0056
0057 ret = mtd_erase(mtd, &erase);
0058 if (ret) {
0059 printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
0060 "on \"%s\" failed\n",
0061 pos, len, mtd->name);
0062 return ret;
0063 }
0064
0065
0066
0067
0068
0069 ret = mtd_write(mtd, pos, len, &retlen, buf);
0070 if (ret)
0071 return ret;
0072 if (retlen != len)
0073 return -EIO;
0074 return 0;
0075 }
0076
0077
0078 static int write_cached_data (struct mtdblk_dev *mtdblk)
0079 {
0080 struct mtd_info *mtd = mtdblk->mbd.mtd;
0081 int ret;
0082
0083 if (mtdblk->cache_state != STATE_DIRTY)
0084 return 0;
0085
0086 pr_debug("mtdblock: writing cached data for \"%s\" "
0087 "at 0x%lx, size 0x%x\n", mtd->name,
0088 mtdblk->cache_offset, mtdblk->cache_size);
0089
0090 ret = erase_write (mtd, mtdblk->cache_offset,
0091 mtdblk->cache_size, mtdblk->cache_data);
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 if (ret == 0 || ret == -EIO)
0105 mtdblk->cache_state = STATE_EMPTY;
0106 return ret;
0107 }
0108
0109
0110 static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
0111 int len, const char *buf)
0112 {
0113 struct mtd_info *mtd = mtdblk->mbd.mtd;
0114 unsigned int sect_size = mtdblk->cache_size;
0115 size_t retlen;
0116 int ret;
0117
0118 pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
0119 mtd->name, pos, len);
0120
0121 if (!sect_size)
0122 return mtd_write(mtd, pos, len, &retlen, buf);
0123
0124 while (len > 0) {
0125 unsigned long sect_start = (pos/sect_size)*sect_size;
0126 unsigned int offset = pos - sect_start;
0127 unsigned int size = sect_size - offset;
0128 if( size > len )
0129 size = len;
0130
0131 if (size == sect_size) {
0132
0133
0134
0135
0136
0137 ret = erase_write (mtd, pos, size, buf);
0138 if (ret)
0139 return ret;
0140 } else {
0141
0142
0143 if (mtdblk->cache_state == STATE_DIRTY &&
0144 mtdblk->cache_offset != sect_start) {
0145 ret = write_cached_data(mtdblk);
0146 if (ret)
0147 return ret;
0148 }
0149
0150 if (mtdblk->cache_state == STATE_EMPTY ||
0151 mtdblk->cache_offset != sect_start) {
0152
0153 mtdblk->cache_state = STATE_EMPTY;
0154 ret = mtd_read(mtd, sect_start, sect_size,
0155 &retlen, mtdblk->cache_data);
0156 if (ret)
0157 return ret;
0158 if (retlen != sect_size)
0159 return -EIO;
0160
0161 mtdblk->cache_offset = sect_start;
0162 mtdblk->cache_size = sect_size;
0163 mtdblk->cache_state = STATE_CLEAN;
0164 }
0165
0166
0167 memcpy (mtdblk->cache_data + offset, buf, size);
0168 mtdblk->cache_state = STATE_DIRTY;
0169 }
0170
0171 buf += size;
0172 pos += size;
0173 len -= size;
0174 }
0175
0176 return 0;
0177 }
0178
0179
0180 static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
0181 int len, char *buf)
0182 {
0183 struct mtd_info *mtd = mtdblk->mbd.mtd;
0184 unsigned int sect_size = mtdblk->cache_size;
0185 size_t retlen;
0186 int ret;
0187
0188 pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
0189 mtd->name, pos, len);
0190
0191 if (!sect_size)
0192 return mtd_read(mtd, pos, len, &retlen, buf);
0193
0194 while (len > 0) {
0195 unsigned long sect_start = (pos/sect_size)*sect_size;
0196 unsigned int offset = pos - sect_start;
0197 unsigned int size = sect_size - offset;
0198 if (size > len)
0199 size = len;
0200
0201
0202
0203
0204
0205
0206
0207 if (mtdblk->cache_state != STATE_EMPTY &&
0208 mtdblk->cache_offset == sect_start) {
0209 memcpy (buf, mtdblk->cache_data + offset, size);
0210 } else {
0211 ret = mtd_read(mtd, pos, size, &retlen, buf);
0212 if (ret)
0213 return ret;
0214 if (retlen != size)
0215 return -EIO;
0216 }
0217
0218 buf += size;
0219 pos += size;
0220 len -= size;
0221 }
0222
0223 return 0;
0224 }
0225
0226 static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
0227 unsigned long block, char *buf)
0228 {
0229 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
0230 return do_cached_read(mtdblk, block<<9, 512, buf);
0231 }
0232
0233 static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
0234 unsigned long block, char *buf)
0235 {
0236 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
0237 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
0238 mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
0239 if (!mtdblk->cache_data)
0240 return -EINTR;
0241
0242
0243
0244
0245 }
0246 return do_cached_write(mtdblk, block<<9, 512, buf);
0247 }
0248
0249 static int mtdblock_open(struct mtd_blktrans_dev *mbd)
0250 {
0251 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
0252
0253 pr_debug("mtdblock_open\n");
0254
0255 if (mtdblk->count) {
0256 mtdblk->count++;
0257 return 0;
0258 }
0259
0260 if (mtd_type_is_nand(mbd->mtd))
0261 pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
0262 mbd->tr->name, mbd->mtd->name);
0263
0264
0265 mtdblk->count = 1;
0266 mutex_init(&mtdblk->cache_mutex);
0267 mtdblk->cache_state = STATE_EMPTY;
0268 if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
0269 mtdblk->cache_size = mbd->mtd->erasesize;
0270 mtdblk->cache_data = NULL;
0271 }
0272
0273 pr_debug("ok\n");
0274
0275 return 0;
0276 }
0277
0278 static void mtdblock_release(struct mtd_blktrans_dev *mbd)
0279 {
0280 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
0281
0282 pr_debug("mtdblock_release\n");
0283
0284 mutex_lock(&mtdblk->cache_mutex);
0285 write_cached_data(mtdblk);
0286 mutex_unlock(&mtdblk->cache_mutex);
0287
0288 if (!--mtdblk->count) {
0289
0290
0291
0292
0293 if (mbd->file_mode & FMODE_WRITE)
0294 mtd_sync(mbd->mtd);
0295 vfree(mtdblk->cache_data);
0296 }
0297
0298 pr_debug("ok\n");
0299 }
0300
0301 static int mtdblock_flush(struct mtd_blktrans_dev *dev)
0302 {
0303 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
0304 int ret;
0305
0306 mutex_lock(&mtdblk->cache_mutex);
0307 ret = write_cached_data(mtdblk);
0308 mutex_unlock(&mtdblk->cache_mutex);
0309 mtd_sync(dev->mtd);
0310 return ret;
0311 }
0312
0313 static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
0314 {
0315 struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
0316
0317 if (!dev)
0318 return;
0319
0320 dev->mbd.mtd = mtd;
0321 dev->mbd.devnum = mtd->index;
0322
0323 dev->mbd.size = mtd->size >> 9;
0324 dev->mbd.tr = tr;
0325
0326 if (!(mtd->flags & MTD_WRITEABLE))
0327 dev->mbd.readonly = 1;
0328
0329 if (add_mtd_blktrans_dev(&dev->mbd))
0330 kfree(dev);
0331 }
0332
0333 static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
0334 {
0335 del_mtd_blktrans_dev(dev);
0336 }
0337
0338 static struct mtd_blktrans_ops mtdblock_tr = {
0339 .name = "mtdblock",
0340 .major = MTD_BLOCK_MAJOR,
0341 .part_bits = 0,
0342 .blksize = 512,
0343 .open = mtdblock_open,
0344 .flush = mtdblock_flush,
0345 .release = mtdblock_release,
0346 .readsect = mtdblock_readsect,
0347 .writesect = mtdblock_writesect,
0348 .add_mtd = mtdblock_add_mtd,
0349 .remove_dev = mtdblock_remove_dev,
0350 .owner = THIS_MODULE,
0351 };
0352
0353 module_mtd_blktrans(mtdblock_tr);
0354
0355 MODULE_LICENSE("GPL");
0356 MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al.");
0357 MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");