0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/init.h>
0009 #include <linux/mm.h>
0010 #include <linux/slab.h>
0011 #include <linux/kmod.h>
0012 #include <linux/major.h>
0013 #include <linux/device_cgroup.h>
0014 #include <linux/blkdev.h>
0015 #include <linux/blk-integrity.h>
0016 #include <linux/backing-dev.h>
0017 #include <linux/module.h>
0018 #include <linux/blkpg.h>
0019 #include <linux/magic.h>
0020 #include <linux/buffer_head.h>
0021 #include <linux/swap.h>
0022 #include <linux/writeback.h>
0023 #include <linux/mount.h>
0024 #include <linux/pseudo_fs.h>
0025 #include <linux/uio.h>
0026 #include <linux/namei.h>
0027 #include <linux/part_stat.h>
0028 #include <linux/uaccess.h>
0029 #include "../fs/internal.h"
0030 #include "blk.h"
0031
0032 struct bdev_inode {
0033 struct block_device bdev;
0034 struct inode vfs_inode;
0035 };
0036
0037 static inline struct bdev_inode *BDEV_I(struct inode *inode)
0038 {
0039 return container_of(inode, struct bdev_inode, vfs_inode);
0040 }
0041
0042 struct block_device *I_BDEV(struct inode *inode)
0043 {
0044 return &BDEV_I(inode)->bdev;
0045 }
0046 EXPORT_SYMBOL(I_BDEV);
0047
0048 static void bdev_write_inode(struct block_device *bdev)
0049 {
0050 struct inode *inode = bdev->bd_inode;
0051 int ret;
0052
0053 spin_lock(&inode->i_lock);
0054 while (inode->i_state & I_DIRTY) {
0055 spin_unlock(&inode->i_lock);
0056 ret = write_inode_now(inode, true);
0057 if (ret)
0058 pr_warn_ratelimited(
0059 "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
0060 bdev, ret);
0061 spin_lock(&inode->i_lock);
0062 }
0063 spin_unlock(&inode->i_lock);
0064 }
0065
0066
0067 static void kill_bdev(struct block_device *bdev)
0068 {
0069 struct address_space *mapping = bdev->bd_inode->i_mapping;
0070
0071 if (mapping_empty(mapping))
0072 return;
0073
0074 invalidate_bh_lrus();
0075 truncate_inode_pages(mapping, 0);
0076 }
0077
0078
0079 void invalidate_bdev(struct block_device *bdev)
0080 {
0081 struct address_space *mapping = bdev->bd_inode->i_mapping;
0082
0083 if (mapping->nrpages) {
0084 invalidate_bh_lrus();
0085 lru_add_drain_all();
0086 invalidate_mapping_pages(mapping, 0, -1);
0087 }
0088 }
0089 EXPORT_SYMBOL(invalidate_bdev);
0090
0091
0092
0093
0094
0095 int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
0096 loff_t lstart, loff_t lend)
0097 {
0098
0099
0100
0101
0102
0103 if (!(mode & FMODE_EXCL)) {
0104 int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
0105 if (err)
0106 goto invalidate;
0107 }
0108
0109 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
0110 if (!(mode & FMODE_EXCL))
0111 bd_abort_claiming(bdev, truncate_bdev_range);
0112 return 0;
0113
0114 invalidate:
0115
0116
0117
0118
0119 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
0120 lstart >> PAGE_SHIFT,
0121 lend >> PAGE_SHIFT);
0122 }
0123
0124 static void set_init_blocksize(struct block_device *bdev)
0125 {
0126 unsigned int bsize = bdev_logical_block_size(bdev);
0127 loff_t size = i_size_read(bdev->bd_inode);
0128
0129 while (bsize < PAGE_SIZE) {
0130 if (size & bsize)
0131 break;
0132 bsize <<= 1;
0133 }
0134 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
0135 }
0136
0137 int set_blocksize(struct block_device *bdev, int size)
0138 {
0139
0140 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
0141 return -EINVAL;
0142
0143
0144 if (size < bdev_logical_block_size(bdev))
0145 return -EINVAL;
0146
0147
0148 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
0149 sync_blockdev(bdev);
0150 bdev->bd_inode->i_blkbits = blksize_bits(size);
0151 kill_bdev(bdev);
0152 }
0153 return 0;
0154 }
0155
0156 EXPORT_SYMBOL(set_blocksize);
0157
0158 int sb_set_blocksize(struct super_block *sb, int size)
0159 {
0160 if (set_blocksize(sb->s_bdev, size))
0161 return 0;
0162
0163
0164 sb->s_blocksize = size;
0165 sb->s_blocksize_bits = blksize_bits(size);
0166 return sb->s_blocksize;
0167 }
0168
0169 EXPORT_SYMBOL(sb_set_blocksize);
0170
0171 int sb_min_blocksize(struct super_block *sb, int size)
0172 {
0173 int minsize = bdev_logical_block_size(sb->s_bdev);
0174 if (size < minsize)
0175 size = minsize;
0176 return sb_set_blocksize(sb, size);
0177 }
0178
0179 EXPORT_SYMBOL(sb_min_blocksize);
0180
0181 int sync_blockdev_nowait(struct block_device *bdev)
0182 {
0183 if (!bdev)
0184 return 0;
0185 return filemap_flush(bdev->bd_inode->i_mapping);
0186 }
0187 EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
0188
0189
0190
0191
0192
0193 int sync_blockdev(struct block_device *bdev)
0194 {
0195 if (!bdev)
0196 return 0;
0197 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
0198 }
0199 EXPORT_SYMBOL(sync_blockdev);
0200
0201 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
0202 {
0203 return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
0204 lstart, lend);
0205 }
0206 EXPORT_SYMBOL(sync_blockdev_range);
0207
0208
0209
0210
0211
0212
0213 int fsync_bdev(struct block_device *bdev)
0214 {
0215 struct super_block *sb = get_super(bdev);
0216 if (sb) {
0217 int res = sync_filesystem(sb);
0218 drop_super(sb);
0219 return res;
0220 }
0221 return sync_blockdev(bdev);
0222 }
0223 EXPORT_SYMBOL(fsync_bdev);
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 int freeze_bdev(struct block_device *bdev)
0238 {
0239 struct super_block *sb;
0240 int error = 0;
0241
0242 mutex_lock(&bdev->bd_fsfreeze_mutex);
0243 if (++bdev->bd_fsfreeze_count > 1)
0244 goto done;
0245
0246 sb = get_active_super(bdev);
0247 if (!sb)
0248 goto sync;
0249 if (sb->s_op->freeze_super)
0250 error = sb->s_op->freeze_super(sb);
0251 else
0252 error = freeze_super(sb);
0253 deactivate_super(sb);
0254
0255 if (error) {
0256 bdev->bd_fsfreeze_count--;
0257 goto done;
0258 }
0259 bdev->bd_fsfreeze_sb = sb;
0260
0261 sync:
0262 sync_blockdev(bdev);
0263 done:
0264 mutex_unlock(&bdev->bd_fsfreeze_mutex);
0265 return error;
0266 }
0267 EXPORT_SYMBOL(freeze_bdev);
0268
0269
0270
0271
0272
0273
0274
0275 int thaw_bdev(struct block_device *bdev)
0276 {
0277 struct super_block *sb;
0278 int error = -EINVAL;
0279
0280 mutex_lock(&bdev->bd_fsfreeze_mutex);
0281 if (!bdev->bd_fsfreeze_count)
0282 goto out;
0283
0284 error = 0;
0285 if (--bdev->bd_fsfreeze_count > 0)
0286 goto out;
0287
0288 sb = bdev->bd_fsfreeze_sb;
0289 if (!sb)
0290 goto out;
0291
0292 if (sb->s_op->thaw_super)
0293 error = sb->s_op->thaw_super(sb);
0294 else
0295 error = thaw_super(sb);
0296 if (error)
0297 bdev->bd_fsfreeze_count++;
0298 else
0299 bdev->bd_fsfreeze_sb = NULL;
0300 out:
0301 mutex_unlock(&bdev->bd_fsfreeze_mutex);
0302 return error;
0303 }
0304 EXPORT_SYMBOL(thaw_bdev);
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322 int bdev_read_page(struct block_device *bdev, sector_t sector,
0323 struct page *page)
0324 {
0325 const struct block_device_operations *ops = bdev->bd_disk->fops;
0326 int result = -EOPNOTSUPP;
0327
0328 if (!ops->rw_page || bdev_get_integrity(bdev))
0329 return result;
0330
0331 result = blk_queue_enter(bdev_get_queue(bdev), 0);
0332 if (result)
0333 return result;
0334 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
0335 REQ_OP_READ);
0336 blk_queue_exit(bdev_get_queue(bdev));
0337 return result;
0338 }
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 int bdev_write_page(struct block_device *bdev, sector_t sector,
0360 struct page *page, struct writeback_control *wbc)
0361 {
0362 int result;
0363 const struct block_device_operations *ops = bdev->bd_disk->fops;
0364
0365 if (!ops->rw_page || bdev_get_integrity(bdev))
0366 return -EOPNOTSUPP;
0367 result = blk_queue_enter(bdev_get_queue(bdev), 0);
0368 if (result)
0369 return result;
0370
0371 set_page_writeback(page);
0372 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
0373 REQ_OP_WRITE);
0374 if (result) {
0375 end_page_writeback(page);
0376 } else {
0377 clean_page_buffers(page);
0378 unlock_page(page);
0379 }
0380 blk_queue_exit(bdev_get_queue(bdev));
0381 return result;
0382 }
0383
0384
0385
0386
0387
0388 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
0389 static struct kmem_cache * bdev_cachep __read_mostly;
0390
0391 static struct inode *bdev_alloc_inode(struct super_block *sb)
0392 {
0393 struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
0394
0395 if (!ei)
0396 return NULL;
0397 memset(&ei->bdev, 0, sizeof(ei->bdev));
0398 return &ei->vfs_inode;
0399 }
0400
0401 static void bdev_free_inode(struct inode *inode)
0402 {
0403 struct block_device *bdev = I_BDEV(inode);
0404
0405 free_percpu(bdev->bd_stats);
0406 kfree(bdev->bd_meta_info);
0407
0408 if (!bdev_is_partition(bdev)) {
0409 if (bdev->bd_disk && bdev->bd_disk->bdi)
0410 bdi_put(bdev->bd_disk->bdi);
0411 kfree(bdev->bd_disk);
0412 }
0413
0414 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
0415 blk_free_ext_minor(MINOR(bdev->bd_dev));
0416
0417 kmem_cache_free(bdev_cachep, BDEV_I(inode));
0418 }
0419
0420 static void init_once(void *data)
0421 {
0422 struct bdev_inode *ei = data;
0423
0424 inode_init_once(&ei->vfs_inode);
0425 }
0426
0427 static void bdev_evict_inode(struct inode *inode)
0428 {
0429 truncate_inode_pages_final(&inode->i_data);
0430 invalidate_inode_buffers(inode);
0431 clear_inode(inode);
0432 }
0433
0434 static const struct super_operations bdev_sops = {
0435 .statfs = simple_statfs,
0436 .alloc_inode = bdev_alloc_inode,
0437 .free_inode = bdev_free_inode,
0438 .drop_inode = generic_delete_inode,
0439 .evict_inode = bdev_evict_inode,
0440 };
0441
0442 static int bd_init_fs_context(struct fs_context *fc)
0443 {
0444 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
0445 if (!ctx)
0446 return -ENOMEM;
0447 fc->s_iflags |= SB_I_CGROUPWB;
0448 ctx->ops = &bdev_sops;
0449 return 0;
0450 }
0451
0452 static struct file_system_type bd_type = {
0453 .name = "bdev",
0454 .init_fs_context = bd_init_fs_context,
0455 .kill_sb = kill_anon_super,
0456 };
0457
0458 struct super_block *blockdev_superblock __read_mostly;
0459 EXPORT_SYMBOL_GPL(blockdev_superblock);
0460
0461 void __init bdev_cache_init(void)
0462 {
0463 int err;
0464 static struct vfsmount *bd_mnt;
0465
0466 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
0467 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
0468 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
0469 init_once);
0470 err = register_filesystem(&bd_type);
0471 if (err)
0472 panic("Cannot register bdev pseudo-fs");
0473 bd_mnt = kern_mount(&bd_type);
0474 if (IS_ERR(bd_mnt))
0475 panic("Cannot create bdev pseudo-fs");
0476 blockdev_superblock = bd_mnt->mnt_sb;
0477 }
0478
0479 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
0480 {
0481 struct block_device *bdev;
0482 struct inode *inode;
0483
0484 inode = new_inode(blockdev_superblock);
0485 if (!inode)
0486 return NULL;
0487 inode->i_mode = S_IFBLK;
0488 inode->i_rdev = 0;
0489 inode->i_data.a_ops = &def_blk_aops;
0490 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
0491
0492 bdev = I_BDEV(inode);
0493 mutex_init(&bdev->bd_fsfreeze_mutex);
0494 spin_lock_init(&bdev->bd_size_lock);
0495 bdev->bd_partno = partno;
0496 bdev->bd_inode = inode;
0497 bdev->bd_queue = disk->queue;
0498 bdev->bd_stats = alloc_percpu(struct disk_stats);
0499 if (!bdev->bd_stats) {
0500 iput(inode);
0501 return NULL;
0502 }
0503 bdev->bd_disk = disk;
0504 return bdev;
0505 }
0506
0507 void bdev_add(struct block_device *bdev, dev_t dev)
0508 {
0509 bdev->bd_dev = dev;
0510 bdev->bd_inode->i_rdev = dev;
0511 bdev->bd_inode->i_ino = dev;
0512 insert_inode_hash(bdev->bd_inode);
0513 }
0514
0515 long nr_blockdev_pages(void)
0516 {
0517 struct inode *inode;
0518 long ret = 0;
0519
0520 spin_lock(&blockdev_superblock->s_inode_list_lock);
0521 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
0522 ret += inode->i_mapping->nrpages;
0523 spin_unlock(&blockdev_superblock->s_inode_list_lock);
0524
0525 return ret;
0526 }
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
0543 void *holder)
0544 {
0545 if (bdev->bd_holder == holder)
0546 return true;
0547 else if (bdev->bd_holder != NULL)
0548 return false;
0549 else if (whole == bdev)
0550 return true;
0551
0552 else if (whole->bd_holder == bd_may_claim)
0553 return true;
0554 else if (whole->bd_holder != NULL)
0555 return false;
0556 else
0557 return true;
0558 }
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 int bd_prepare_to_claim(struct block_device *bdev, void *holder)
0573 {
0574 struct block_device *whole = bdev_whole(bdev);
0575
0576 if (WARN_ON_ONCE(!holder))
0577 return -EINVAL;
0578 retry:
0579 spin_lock(&bdev_lock);
0580
0581 if (!bd_may_claim(bdev, whole, holder)) {
0582 spin_unlock(&bdev_lock);
0583 return -EBUSY;
0584 }
0585
0586
0587 if (whole->bd_claiming) {
0588 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
0589 DEFINE_WAIT(wait);
0590
0591 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
0592 spin_unlock(&bdev_lock);
0593 schedule();
0594 finish_wait(wq, &wait);
0595 goto retry;
0596 }
0597
0598
0599 whole->bd_claiming = holder;
0600 spin_unlock(&bdev_lock);
0601 return 0;
0602 }
0603 EXPORT_SYMBOL_GPL(bd_prepare_to_claim);
0604
0605 static void bd_clear_claiming(struct block_device *whole, void *holder)
0606 {
0607 lockdep_assert_held(&bdev_lock);
0608
0609 BUG_ON(whole->bd_claiming != holder);
0610 whole->bd_claiming = NULL;
0611 wake_up_bit(&whole->bd_claiming, 0);
0612 }
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622 static void bd_finish_claiming(struct block_device *bdev, void *holder)
0623 {
0624 struct block_device *whole = bdev_whole(bdev);
0625
0626 spin_lock(&bdev_lock);
0627 BUG_ON(!bd_may_claim(bdev, whole, holder));
0628
0629
0630
0631
0632 whole->bd_holders++;
0633 whole->bd_holder = bd_may_claim;
0634 bdev->bd_holders++;
0635 bdev->bd_holder = holder;
0636 bd_clear_claiming(whole, holder);
0637 spin_unlock(&bdev_lock);
0638 }
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649 void bd_abort_claiming(struct block_device *bdev, void *holder)
0650 {
0651 spin_lock(&bdev_lock);
0652 bd_clear_claiming(bdev_whole(bdev), holder);
0653 spin_unlock(&bdev_lock);
0654 }
0655 EXPORT_SYMBOL(bd_abort_claiming);
0656
0657 static void blkdev_flush_mapping(struct block_device *bdev)
0658 {
0659 WARN_ON_ONCE(bdev->bd_holders);
0660 sync_blockdev(bdev);
0661 kill_bdev(bdev);
0662 bdev_write_inode(bdev);
0663 }
0664
0665 static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
0666 {
0667 struct gendisk *disk = bdev->bd_disk;
0668 int ret;
0669
0670 if (disk->fops->open) {
0671 ret = disk->fops->open(bdev, mode);
0672 if (ret) {
0673
0674 if (ret == -ENOMEDIUM &&
0675 test_bit(GD_NEED_PART_SCAN, &disk->state))
0676 bdev_disk_changed(disk, true);
0677 return ret;
0678 }
0679 }
0680
0681 if (!atomic_read(&bdev->bd_openers))
0682 set_init_blocksize(bdev);
0683 if (test_bit(GD_NEED_PART_SCAN, &disk->state))
0684 bdev_disk_changed(disk, false);
0685 atomic_inc(&bdev->bd_openers);
0686 return 0;
0687 }
0688
0689 static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
0690 {
0691 if (atomic_dec_and_test(&bdev->bd_openers))
0692 blkdev_flush_mapping(bdev);
0693 if (bdev->bd_disk->fops->release)
0694 bdev->bd_disk->fops->release(bdev->bd_disk, mode);
0695 }
0696
0697 static int blkdev_get_part(struct block_device *part, fmode_t mode)
0698 {
0699 struct gendisk *disk = part->bd_disk;
0700 int ret;
0701
0702 if (atomic_read(&part->bd_openers))
0703 goto done;
0704
0705 ret = blkdev_get_whole(bdev_whole(part), mode);
0706 if (ret)
0707 return ret;
0708
0709 ret = -ENXIO;
0710 if (!bdev_nr_sectors(part))
0711 goto out_blkdev_put;
0712
0713 disk->open_partitions++;
0714 set_init_blocksize(part);
0715 done:
0716 atomic_inc(&part->bd_openers);
0717 return 0;
0718
0719 out_blkdev_put:
0720 blkdev_put_whole(bdev_whole(part), mode);
0721 return ret;
0722 }
0723
0724 static void blkdev_put_part(struct block_device *part, fmode_t mode)
0725 {
0726 struct block_device *whole = bdev_whole(part);
0727
0728 if (!atomic_dec_and_test(&part->bd_openers))
0729 return;
0730 blkdev_flush_mapping(part);
0731 whole->bd_disk->open_partitions--;
0732 blkdev_put_whole(whole, mode);
0733 }
0734
0735 struct block_device *blkdev_get_no_open(dev_t dev)
0736 {
0737 struct block_device *bdev;
0738 struct inode *inode;
0739
0740 inode = ilookup(blockdev_superblock, dev);
0741 if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
0742 blk_request_module(dev);
0743 inode = ilookup(blockdev_superblock, dev);
0744 if (inode)
0745 pr_warn_ratelimited(
0746 "block device autoloading is deprecated and will be removed.\n");
0747 }
0748 if (!inode)
0749 return NULL;
0750
0751
0752 bdev = &BDEV_I(inode)->bdev;
0753 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
0754 bdev = NULL;
0755 iput(inode);
0756 return bdev;
0757 }
0758
0759 void blkdev_put_no_open(struct block_device *bdev)
0760 {
0761 put_device(&bdev->bd_device);
0762 }
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
0786 {
0787 bool unblock_events = true;
0788 struct block_device *bdev;
0789 struct gendisk *disk;
0790 int ret;
0791
0792 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
0793 MAJOR(dev), MINOR(dev),
0794 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
0795 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
0796 if (ret)
0797 return ERR_PTR(ret);
0798
0799 bdev = blkdev_get_no_open(dev);
0800 if (!bdev)
0801 return ERR_PTR(-ENXIO);
0802 disk = bdev->bd_disk;
0803
0804 if (mode & FMODE_EXCL) {
0805 ret = bd_prepare_to_claim(bdev, holder);
0806 if (ret)
0807 goto put_blkdev;
0808 }
0809
0810 disk_block_events(disk);
0811
0812 mutex_lock(&disk->open_mutex);
0813 ret = -ENXIO;
0814 if (!disk_live(disk))
0815 goto abort_claiming;
0816 if (!try_module_get(disk->fops->owner))
0817 goto abort_claiming;
0818 if (bdev_is_partition(bdev))
0819 ret = blkdev_get_part(bdev, mode);
0820 else
0821 ret = blkdev_get_whole(bdev, mode);
0822 if (ret)
0823 goto put_module;
0824 if (mode & FMODE_EXCL) {
0825 bd_finish_claiming(bdev, holder);
0826
0827
0828
0829
0830
0831
0832
0833
0834 if ((mode & FMODE_WRITE) && !bdev->bd_write_holder &&
0835 (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
0836 bdev->bd_write_holder = true;
0837 unblock_events = false;
0838 }
0839 }
0840 mutex_unlock(&disk->open_mutex);
0841
0842 if (unblock_events)
0843 disk_unblock_events(disk);
0844 return bdev;
0845 put_module:
0846 module_put(disk->fops->owner);
0847 abort_claiming:
0848 if (mode & FMODE_EXCL)
0849 bd_abort_claiming(bdev, holder);
0850 mutex_unlock(&disk->open_mutex);
0851 disk_unblock_events(disk);
0852 put_blkdev:
0853 blkdev_put_no_open(bdev);
0854 return ERR_PTR(ret);
0855 }
0856 EXPORT_SYMBOL(blkdev_get_by_dev);
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
0876 void *holder)
0877 {
0878 struct block_device *bdev;
0879 dev_t dev;
0880 int error;
0881
0882 error = lookup_bdev(path, &dev);
0883 if (error)
0884 return ERR_PTR(error);
0885
0886 bdev = blkdev_get_by_dev(dev, mode, holder);
0887 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
0888 blkdev_put(bdev, mode);
0889 return ERR_PTR(-EACCES);
0890 }
0891
0892 return bdev;
0893 }
0894 EXPORT_SYMBOL(blkdev_get_by_path);
0895
0896 void blkdev_put(struct block_device *bdev, fmode_t mode)
0897 {
0898 struct gendisk *disk = bdev->bd_disk;
0899
0900
0901
0902
0903
0904
0905
0906
0907 if (atomic_read(&bdev->bd_openers) == 1)
0908 sync_blockdev(bdev);
0909
0910 mutex_lock(&disk->open_mutex);
0911 if (mode & FMODE_EXCL) {
0912 struct block_device *whole = bdev_whole(bdev);
0913 bool bdev_free;
0914
0915
0916
0917
0918
0919
0920 spin_lock(&bdev_lock);
0921
0922 WARN_ON_ONCE(--bdev->bd_holders < 0);
0923 WARN_ON_ONCE(--whole->bd_holders < 0);
0924
0925 if ((bdev_free = !bdev->bd_holders))
0926 bdev->bd_holder = NULL;
0927 if (!whole->bd_holders)
0928 whole->bd_holder = NULL;
0929
0930 spin_unlock(&bdev_lock);
0931
0932
0933
0934
0935
0936 if (bdev_free && bdev->bd_write_holder) {
0937 disk_unblock_events(disk);
0938 bdev->bd_write_holder = false;
0939 }
0940 }
0941
0942
0943
0944
0945
0946
0947 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
0948
0949 if (bdev_is_partition(bdev))
0950 blkdev_put_part(bdev, mode);
0951 else
0952 blkdev_put_whole(bdev, mode);
0953 mutex_unlock(&disk->open_mutex);
0954
0955 module_put(disk->fops->owner);
0956 blkdev_put_no_open(bdev);
0957 }
0958 EXPORT_SYMBOL(blkdev_put);
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971 int lookup_bdev(const char *pathname, dev_t *dev)
0972 {
0973 struct inode *inode;
0974 struct path path;
0975 int error;
0976
0977 if (!pathname || !*pathname)
0978 return -EINVAL;
0979
0980 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
0981 if (error)
0982 return error;
0983
0984 inode = d_backing_inode(path.dentry);
0985 error = -ENOTBLK;
0986 if (!S_ISBLK(inode->i_mode))
0987 goto out_path_put;
0988 error = -EACCES;
0989 if (!may_open_dev(&path))
0990 goto out_path_put;
0991
0992 *dev = inode->i_rdev;
0993 error = 0;
0994 out_path_put:
0995 path_put(&path);
0996 return error;
0997 }
0998 EXPORT_SYMBOL(lookup_bdev);
0999
1000 int __invalidate_device(struct block_device *bdev, bool kill_dirty)
1001 {
1002 struct super_block *sb = get_super(bdev);
1003 int res = 0;
1004
1005 if (sb) {
1006
1007
1008
1009
1010
1011
1012 shrink_dcache_sb(sb);
1013 res = invalidate_inodes(sb, kill_dirty);
1014 drop_super(sb);
1015 }
1016 invalidate_bdev(bdev);
1017 return res;
1018 }
1019 EXPORT_SYMBOL(__invalidate_device);
1020
1021 void sync_bdevs(bool wait)
1022 {
1023 struct inode *inode, *old_inode = NULL;
1024
1025 spin_lock(&blockdev_superblock->s_inode_list_lock);
1026 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1027 struct address_space *mapping = inode->i_mapping;
1028 struct block_device *bdev;
1029
1030 spin_lock(&inode->i_lock);
1031 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1032 mapping->nrpages == 0) {
1033 spin_unlock(&inode->i_lock);
1034 continue;
1035 }
1036 __iget(inode);
1037 spin_unlock(&inode->i_lock);
1038 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1039
1040
1041
1042
1043
1044
1045
1046
1047 iput(old_inode);
1048 old_inode = inode;
1049 bdev = I_BDEV(inode);
1050
1051 mutex_lock(&bdev->bd_disk->open_mutex);
1052 if (!atomic_read(&bdev->bd_openers)) {
1053 ;
1054 } else if (wait) {
1055
1056
1057
1058
1059
1060
1061 filemap_fdatawait_keep_errors(inode->i_mapping);
1062 } else {
1063 filemap_fdatawrite(inode->i_mapping);
1064 }
1065 mutex_unlock(&bdev->bd_disk->open_mutex);
1066
1067 spin_lock(&blockdev_superblock->s_inode_list_lock);
1068 }
1069 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1070 iput(old_inode);
1071 }