Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  Copyright (C) 1991, 1992  Linus Torvalds
0004  *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
0005  *  Copyright (C) 2016 - 2020 Christoph Hellwig
0006  */
0007 
0008 #include <linux/init.h>
0009 #include <linux/mm.h>
0010 #include <linux/slab.h>
0011 #include <linux/kmod.h>
0012 #include <linux/major.h>
0013 #include <linux/device_cgroup.h>
0014 #include <linux/blkdev.h>
0015 #include <linux/blk-integrity.h>
0016 #include <linux/backing-dev.h>
0017 #include <linux/module.h>
0018 #include <linux/blkpg.h>
0019 #include <linux/magic.h>
0020 #include <linux/buffer_head.h>
0021 #include <linux/swap.h>
0022 #include <linux/writeback.h>
0023 #include <linux/mount.h>
0024 #include <linux/pseudo_fs.h>
0025 #include <linux/uio.h>
0026 #include <linux/namei.h>
0027 #include <linux/part_stat.h>
0028 #include <linux/uaccess.h>
0029 #include "../fs/internal.h"
0030 #include "blk.h"
0031 
0032 struct bdev_inode {
0033     struct block_device bdev;
0034     struct inode vfs_inode;
0035 };
0036 
0037 static inline struct bdev_inode *BDEV_I(struct inode *inode)
0038 {
0039     return container_of(inode, struct bdev_inode, vfs_inode);
0040 }
0041 
0042 struct block_device *I_BDEV(struct inode *inode)
0043 {
0044     return &BDEV_I(inode)->bdev;
0045 }
0046 EXPORT_SYMBOL(I_BDEV);
0047 
0048 static void bdev_write_inode(struct block_device *bdev)
0049 {
0050     struct inode *inode = bdev->bd_inode;
0051     int ret;
0052 
0053     spin_lock(&inode->i_lock);
0054     while (inode->i_state & I_DIRTY) {
0055         spin_unlock(&inode->i_lock);
0056         ret = write_inode_now(inode, true);
0057         if (ret)
0058             pr_warn_ratelimited(
0059     "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
0060                 bdev, ret);
0061         spin_lock(&inode->i_lock);
0062     }
0063     spin_unlock(&inode->i_lock);
0064 }
0065 
0066 /* Kill _all_ buffers and pagecache , dirty or not.. */
0067 static void kill_bdev(struct block_device *bdev)
0068 {
0069     struct address_space *mapping = bdev->bd_inode->i_mapping;
0070 
0071     if (mapping_empty(mapping))
0072         return;
0073 
0074     invalidate_bh_lrus();
0075     truncate_inode_pages(mapping, 0);
0076 }
0077 
0078 /* Invalidate clean unused buffers and pagecache. */
0079 void invalidate_bdev(struct block_device *bdev)
0080 {
0081     struct address_space *mapping = bdev->bd_inode->i_mapping;
0082 
0083     if (mapping->nrpages) {
0084         invalidate_bh_lrus();
0085         lru_add_drain_all();    /* make sure all lru add caches are flushed */
0086         invalidate_mapping_pages(mapping, 0, -1);
0087     }
0088 }
0089 EXPORT_SYMBOL(invalidate_bdev);
0090 
0091 /*
0092  * Drop all buffers & page cache for given bdev range. This function bails
0093  * with error if bdev has other exclusive owner (such as filesystem).
0094  */
0095 int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
0096             loff_t lstart, loff_t lend)
0097 {
0098     /*
0099      * If we don't hold exclusive handle for the device, upgrade to it
0100      * while we discard the buffer cache to avoid discarding buffers
0101      * under live filesystem.
0102      */
0103     if (!(mode & FMODE_EXCL)) {
0104         int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
0105         if (err)
0106             goto invalidate;
0107     }
0108 
0109     truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
0110     if (!(mode & FMODE_EXCL))
0111         bd_abort_claiming(bdev, truncate_bdev_range);
0112     return 0;
0113 
0114 invalidate:
0115     /*
0116      * Someone else has handle exclusively open. Try invalidating instead.
0117      * The 'end' argument is inclusive so the rounding is safe.
0118      */
0119     return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
0120                          lstart >> PAGE_SHIFT,
0121                          lend >> PAGE_SHIFT);
0122 }
0123 
0124 static void set_init_blocksize(struct block_device *bdev)
0125 {
0126     unsigned int bsize = bdev_logical_block_size(bdev);
0127     loff_t size = i_size_read(bdev->bd_inode);
0128 
0129     while (bsize < PAGE_SIZE) {
0130         if (size & bsize)
0131             break;
0132         bsize <<= 1;
0133     }
0134     bdev->bd_inode->i_blkbits = blksize_bits(bsize);
0135 }
0136 
0137 int set_blocksize(struct block_device *bdev, int size)
0138 {
0139     /* Size must be a power of two, and between 512 and PAGE_SIZE */
0140     if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
0141         return -EINVAL;
0142 
0143     /* Size cannot be smaller than the size supported by the device */
0144     if (size < bdev_logical_block_size(bdev))
0145         return -EINVAL;
0146 
0147     /* Don't change the size if it is same as current */
0148     if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
0149         sync_blockdev(bdev);
0150         bdev->bd_inode->i_blkbits = blksize_bits(size);
0151         kill_bdev(bdev);
0152     }
0153     return 0;
0154 }
0155 
0156 EXPORT_SYMBOL(set_blocksize);
0157 
0158 int sb_set_blocksize(struct super_block *sb, int size)
0159 {
0160     if (set_blocksize(sb->s_bdev, size))
0161         return 0;
0162     /* If we get here, we know size is power of two
0163      * and it's value is between 512 and PAGE_SIZE */
0164     sb->s_blocksize = size;
0165     sb->s_blocksize_bits = blksize_bits(size);
0166     return sb->s_blocksize;
0167 }
0168 
0169 EXPORT_SYMBOL(sb_set_blocksize);
0170 
0171 int sb_min_blocksize(struct super_block *sb, int size)
0172 {
0173     int minsize = bdev_logical_block_size(sb->s_bdev);
0174     if (size < minsize)
0175         size = minsize;
0176     return sb_set_blocksize(sb, size);
0177 }
0178 
0179 EXPORT_SYMBOL(sb_min_blocksize);
0180 
0181 int sync_blockdev_nowait(struct block_device *bdev)
0182 {
0183     if (!bdev)
0184         return 0;
0185     return filemap_flush(bdev->bd_inode->i_mapping);
0186 }
0187 EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
0188 
0189 /*
0190  * Write out and wait upon all the dirty data associated with a block
0191  * device via its mapping.  Does not take the superblock lock.
0192  */
0193 int sync_blockdev(struct block_device *bdev)
0194 {
0195     if (!bdev)
0196         return 0;
0197     return filemap_write_and_wait(bdev->bd_inode->i_mapping);
0198 }
0199 EXPORT_SYMBOL(sync_blockdev);
0200 
0201 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
0202 {
0203     return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
0204             lstart, lend);
0205 }
0206 EXPORT_SYMBOL(sync_blockdev_range);
0207 
0208 /*
0209  * Write out and wait upon all dirty data associated with this
0210  * device.   Filesystem data as well as the underlying block
0211  * device.  Takes the superblock lock.
0212  */
0213 int fsync_bdev(struct block_device *bdev)
0214 {
0215     struct super_block *sb = get_super(bdev);
0216     if (sb) {
0217         int res = sync_filesystem(sb);
0218         drop_super(sb);
0219         return res;
0220     }
0221     return sync_blockdev(bdev);
0222 }
0223 EXPORT_SYMBOL(fsync_bdev);
0224 
0225 /**
0226  * freeze_bdev  --  lock a filesystem and force it into a consistent state
0227  * @bdev:   blockdevice to lock
0228  *
0229  * If a superblock is found on this device, we take the s_umount semaphore
0230  * on it to make sure nobody unmounts until the snapshot creation is done.
0231  * The reference counter (bd_fsfreeze_count) guarantees that only the last
0232  * unfreeze process can unfreeze the frozen filesystem actually when multiple
0233  * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
0234  * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
0235  * actually.
0236  */
0237 int freeze_bdev(struct block_device *bdev)
0238 {
0239     struct super_block *sb;
0240     int error = 0;
0241 
0242     mutex_lock(&bdev->bd_fsfreeze_mutex);
0243     if (++bdev->bd_fsfreeze_count > 1)
0244         goto done;
0245 
0246     sb = get_active_super(bdev);
0247     if (!sb)
0248         goto sync;
0249     if (sb->s_op->freeze_super)
0250         error = sb->s_op->freeze_super(sb);
0251     else
0252         error = freeze_super(sb);
0253     deactivate_super(sb);
0254 
0255     if (error) {
0256         bdev->bd_fsfreeze_count--;
0257         goto done;
0258     }
0259     bdev->bd_fsfreeze_sb = sb;
0260 
0261 sync:
0262     sync_blockdev(bdev);
0263 done:
0264     mutex_unlock(&bdev->bd_fsfreeze_mutex);
0265     return error;
0266 }
0267 EXPORT_SYMBOL(freeze_bdev);
0268 
0269 /**
0270  * thaw_bdev  -- unlock filesystem
0271  * @bdev:   blockdevice to unlock
0272  *
0273  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
0274  */
0275 int thaw_bdev(struct block_device *bdev)
0276 {
0277     struct super_block *sb;
0278     int error = -EINVAL;
0279 
0280     mutex_lock(&bdev->bd_fsfreeze_mutex);
0281     if (!bdev->bd_fsfreeze_count)
0282         goto out;
0283 
0284     error = 0;
0285     if (--bdev->bd_fsfreeze_count > 0)
0286         goto out;
0287 
0288     sb = bdev->bd_fsfreeze_sb;
0289     if (!sb)
0290         goto out;
0291 
0292     if (sb->s_op->thaw_super)
0293         error = sb->s_op->thaw_super(sb);
0294     else
0295         error = thaw_super(sb);
0296     if (error)
0297         bdev->bd_fsfreeze_count++;
0298     else
0299         bdev->bd_fsfreeze_sb = NULL;
0300 out:
0301     mutex_unlock(&bdev->bd_fsfreeze_mutex);
0302     return error;
0303 }
0304 EXPORT_SYMBOL(thaw_bdev);
0305 
0306 /**
0307  * bdev_read_page() - Start reading a page from a block device
0308  * @bdev: The device to read the page from
0309  * @sector: The offset on the device to read the page to (need not be aligned)
0310  * @page: The page to read
0311  *
0312  * On entry, the page should be locked.  It will be unlocked when the page
0313  * has been read.  If the block driver implements rw_page synchronously,
0314  * that will be true on exit from this function, but it need not be.
0315  *
0316  * Errors returned by this function are usually "soft", eg out of memory, or
0317  * queue full; callers should try a different route to read this page rather
0318  * than propagate an error back up the stack.
0319  *
0320  * Return: negative errno if an error occurs, 0 if submission was successful.
0321  */
0322 int bdev_read_page(struct block_device *bdev, sector_t sector,
0323             struct page *page)
0324 {
0325     const struct block_device_operations *ops = bdev->bd_disk->fops;
0326     int result = -EOPNOTSUPP;
0327 
0328     if (!ops->rw_page || bdev_get_integrity(bdev))
0329         return result;
0330 
0331     result = blk_queue_enter(bdev_get_queue(bdev), 0);
0332     if (result)
0333         return result;
0334     result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
0335                   REQ_OP_READ);
0336     blk_queue_exit(bdev_get_queue(bdev));
0337     return result;
0338 }
0339 
0340 /**
0341  * bdev_write_page() - Start writing a page to a block device
0342  * @bdev: The device to write the page to
0343  * @sector: The offset on the device to write the page to (need not be aligned)
0344  * @page: The page to write
0345  * @wbc: The writeback_control for the write
0346  *
0347  * On entry, the page should be locked and not currently under writeback.
0348  * On exit, if the write started successfully, the page will be unlocked and
0349  * under writeback.  If the write failed already (eg the driver failed to
0350  * queue the page to the device), the page will still be locked.  If the
0351  * caller is a ->writepage implementation, it will need to unlock the page.
0352  *
0353  * Errors returned by this function are usually "soft", eg out of memory, or
0354  * queue full; callers should try a different route to write this page rather
0355  * than propagate an error back up the stack.
0356  *
0357  * Return: negative errno if an error occurs, 0 if submission was successful.
0358  */
0359 int bdev_write_page(struct block_device *bdev, sector_t sector,
0360             struct page *page, struct writeback_control *wbc)
0361 {
0362     int result;
0363     const struct block_device_operations *ops = bdev->bd_disk->fops;
0364 
0365     if (!ops->rw_page || bdev_get_integrity(bdev))
0366         return -EOPNOTSUPP;
0367     result = blk_queue_enter(bdev_get_queue(bdev), 0);
0368     if (result)
0369         return result;
0370 
0371     set_page_writeback(page);
0372     result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
0373                   REQ_OP_WRITE);
0374     if (result) {
0375         end_page_writeback(page);
0376     } else {
0377         clean_page_buffers(page);
0378         unlock_page(page);
0379     }
0380     blk_queue_exit(bdev_get_queue(bdev));
0381     return result;
0382 }
0383 
0384 /*
0385  * pseudo-fs
0386  */
0387 
0388 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
0389 static struct kmem_cache * bdev_cachep __read_mostly;
0390 
0391 static struct inode *bdev_alloc_inode(struct super_block *sb)
0392 {
0393     struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
0394 
0395     if (!ei)
0396         return NULL;
0397     memset(&ei->bdev, 0, sizeof(ei->bdev));
0398     return &ei->vfs_inode;
0399 }
0400 
0401 static void bdev_free_inode(struct inode *inode)
0402 {
0403     struct block_device *bdev = I_BDEV(inode);
0404 
0405     free_percpu(bdev->bd_stats);
0406     kfree(bdev->bd_meta_info);
0407 
0408     if (!bdev_is_partition(bdev)) {
0409         if (bdev->bd_disk && bdev->bd_disk->bdi)
0410             bdi_put(bdev->bd_disk->bdi);
0411         kfree(bdev->bd_disk);
0412     }
0413 
0414     if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
0415         blk_free_ext_minor(MINOR(bdev->bd_dev));
0416 
0417     kmem_cache_free(bdev_cachep, BDEV_I(inode));
0418 }
0419 
0420 static void init_once(void *data)
0421 {
0422     struct bdev_inode *ei = data;
0423 
0424     inode_init_once(&ei->vfs_inode);
0425 }
0426 
0427 static void bdev_evict_inode(struct inode *inode)
0428 {
0429     truncate_inode_pages_final(&inode->i_data);
0430     invalidate_inode_buffers(inode); /* is it needed here? */
0431     clear_inode(inode);
0432 }
0433 
0434 static const struct super_operations bdev_sops = {
0435     .statfs = simple_statfs,
0436     .alloc_inode = bdev_alloc_inode,
0437     .free_inode = bdev_free_inode,
0438     .drop_inode = generic_delete_inode,
0439     .evict_inode = bdev_evict_inode,
0440 };
0441 
0442 static int bd_init_fs_context(struct fs_context *fc)
0443 {
0444     struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
0445     if (!ctx)
0446         return -ENOMEM;
0447     fc->s_iflags |= SB_I_CGROUPWB;
0448     ctx->ops = &bdev_sops;
0449     return 0;
0450 }
0451 
0452 static struct file_system_type bd_type = {
0453     .name       = "bdev",
0454     .init_fs_context = bd_init_fs_context,
0455     .kill_sb    = kill_anon_super,
0456 };
0457 
0458 struct super_block *blockdev_superblock __read_mostly;
0459 EXPORT_SYMBOL_GPL(blockdev_superblock);
0460 
0461 void __init bdev_cache_init(void)
0462 {
0463     int err;
0464     static struct vfsmount *bd_mnt;
0465 
0466     bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
0467             0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
0468                 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
0469             init_once);
0470     err = register_filesystem(&bd_type);
0471     if (err)
0472         panic("Cannot register bdev pseudo-fs");
0473     bd_mnt = kern_mount(&bd_type);
0474     if (IS_ERR(bd_mnt))
0475         panic("Cannot create bdev pseudo-fs");
0476     blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
0477 }
0478 
0479 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
0480 {
0481     struct block_device *bdev;
0482     struct inode *inode;
0483 
0484     inode = new_inode(blockdev_superblock);
0485     if (!inode)
0486         return NULL;
0487     inode->i_mode = S_IFBLK;
0488     inode->i_rdev = 0;
0489     inode->i_data.a_ops = &def_blk_aops;
0490     mapping_set_gfp_mask(&inode->i_data, GFP_USER);
0491 
0492     bdev = I_BDEV(inode);
0493     mutex_init(&bdev->bd_fsfreeze_mutex);
0494     spin_lock_init(&bdev->bd_size_lock);
0495     bdev->bd_partno = partno;
0496     bdev->bd_inode = inode;
0497     bdev->bd_queue = disk->queue;
0498     bdev->bd_stats = alloc_percpu(struct disk_stats);
0499     if (!bdev->bd_stats) {
0500         iput(inode);
0501         return NULL;
0502     }
0503     bdev->bd_disk = disk;
0504     return bdev;
0505 }
0506 
0507 void bdev_add(struct block_device *bdev, dev_t dev)
0508 {
0509     bdev->bd_dev = dev;
0510     bdev->bd_inode->i_rdev = dev;
0511     bdev->bd_inode->i_ino = dev;
0512     insert_inode_hash(bdev->bd_inode);
0513 }
0514 
0515 long nr_blockdev_pages(void)
0516 {
0517     struct inode *inode;
0518     long ret = 0;
0519 
0520     spin_lock(&blockdev_superblock->s_inode_list_lock);
0521     list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
0522         ret += inode->i_mapping->nrpages;
0523     spin_unlock(&blockdev_superblock->s_inode_list_lock);
0524 
0525     return ret;
0526 }
0527 
0528 /**
0529  * bd_may_claim - test whether a block device can be claimed
0530  * @bdev: block device of interest
0531  * @whole: whole block device containing @bdev, may equal @bdev
0532  * @holder: holder trying to claim @bdev
0533  *
0534  * Test whether @bdev can be claimed by @holder.
0535  *
0536  * CONTEXT:
0537  * spin_lock(&bdev_lock).
0538  *
0539  * RETURNS:
0540  * %true if @bdev can be claimed, %false otherwise.
0541  */
0542 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
0543              void *holder)
0544 {
0545     if (bdev->bd_holder == holder)
0546         return true;     /* already a holder */
0547     else if (bdev->bd_holder != NULL)
0548         return false;    /* held by someone else */
0549     else if (whole == bdev)
0550         return true;     /* is a whole device which isn't held */
0551 
0552     else if (whole->bd_holder == bd_may_claim)
0553         return true;     /* is a partition of a device that is being partitioned */
0554     else if (whole->bd_holder != NULL)
0555         return false;    /* is a partition of a held device */
0556     else
0557         return true;     /* is a partition of an un-held device */
0558 }
0559 
0560 /**
0561  * bd_prepare_to_claim - claim a block device
0562  * @bdev: block device of interest
0563  * @holder: holder trying to claim @bdev
0564  *
0565  * Claim @bdev.  This function fails if @bdev is already claimed by another
0566  * holder and waits if another claiming is in progress. return, the caller
0567  * has ownership of bd_claiming and bd_holder[s].
0568  *
0569  * RETURNS:
0570  * 0 if @bdev can be claimed, -EBUSY otherwise.
0571  */
0572 int bd_prepare_to_claim(struct block_device *bdev, void *holder)
0573 {
0574     struct block_device *whole = bdev_whole(bdev);
0575 
0576     if (WARN_ON_ONCE(!holder))
0577         return -EINVAL;
0578 retry:
0579     spin_lock(&bdev_lock);
0580     /* if someone else claimed, fail */
0581     if (!bd_may_claim(bdev, whole, holder)) {
0582         spin_unlock(&bdev_lock);
0583         return -EBUSY;
0584     }
0585 
0586     /* if claiming is already in progress, wait for it to finish */
0587     if (whole->bd_claiming) {
0588         wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
0589         DEFINE_WAIT(wait);
0590 
0591         prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
0592         spin_unlock(&bdev_lock);
0593         schedule();
0594         finish_wait(wq, &wait);
0595         goto retry;
0596     }
0597 
0598     /* yay, all mine */
0599     whole->bd_claiming = holder;
0600     spin_unlock(&bdev_lock);
0601     return 0;
0602 }
0603 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
0604 
0605 static void bd_clear_claiming(struct block_device *whole, void *holder)
0606 {
0607     lockdep_assert_held(&bdev_lock);
0608     /* tell others that we're done */
0609     BUG_ON(whole->bd_claiming != holder);
0610     whole->bd_claiming = NULL;
0611     wake_up_bit(&whole->bd_claiming, 0);
0612 }
0613 
0614 /**
0615  * bd_finish_claiming - finish claiming of a block device
0616  * @bdev: block device of interest
0617  * @holder: holder that has claimed @bdev
0618  *
0619  * Finish exclusive open of a block device. Mark the device as exlusively
0620  * open by the holder and wake up all waiters for exclusive open to finish.
0621  */
0622 static void bd_finish_claiming(struct block_device *bdev, void *holder)
0623 {
0624     struct block_device *whole = bdev_whole(bdev);
0625 
0626     spin_lock(&bdev_lock);
0627     BUG_ON(!bd_may_claim(bdev, whole, holder));
0628     /*
0629      * Note that for a whole device bd_holders will be incremented twice,
0630      * and bd_holder will be set to bd_may_claim before being set to holder
0631      */
0632     whole->bd_holders++;
0633     whole->bd_holder = bd_may_claim;
0634     bdev->bd_holders++;
0635     bdev->bd_holder = holder;
0636     bd_clear_claiming(whole, holder);
0637     spin_unlock(&bdev_lock);
0638 }
0639 
0640 /**
0641  * bd_abort_claiming - abort claiming of a block device
0642  * @bdev: block device of interest
0643  * @holder: holder that has claimed @bdev
0644  *
0645  * Abort claiming of a block device when the exclusive open failed. This can be
0646  * also used when exclusive open is not actually desired and we just needed
0647  * to block other exclusive openers for a while.
0648  */
0649 void bd_abort_claiming(struct block_device *bdev, void *holder)
0650 {
0651     spin_lock(&bdev_lock);
0652     bd_clear_claiming(bdev_whole(bdev), holder);
0653     spin_unlock(&bdev_lock);
0654 }
0655 EXPORT_SYMBOL(bd_abort_claiming);
0656 
0657 static void blkdev_flush_mapping(struct block_device *bdev)
0658 {
0659     WARN_ON_ONCE(bdev->bd_holders);
0660     sync_blockdev(bdev);
0661     kill_bdev(bdev);
0662     bdev_write_inode(bdev);
0663 }
0664 
0665 static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
0666 {
0667     struct gendisk *disk = bdev->bd_disk;
0668     int ret;
0669 
0670     if (disk->fops->open) {
0671         ret = disk->fops->open(bdev, mode);
0672         if (ret) {
0673             /* avoid ghost partitions on a removed medium */
0674             if (ret == -ENOMEDIUM &&
0675                  test_bit(GD_NEED_PART_SCAN, &disk->state))
0676                 bdev_disk_changed(disk, true);
0677             return ret;
0678         }
0679     }
0680 
0681     if (!atomic_read(&bdev->bd_openers))
0682         set_init_blocksize(bdev);
0683     if (test_bit(GD_NEED_PART_SCAN, &disk->state))
0684         bdev_disk_changed(disk, false);
0685     atomic_inc(&bdev->bd_openers);
0686     return 0;
0687 }
0688 
0689 static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
0690 {
0691     if (atomic_dec_and_test(&bdev->bd_openers))
0692         blkdev_flush_mapping(bdev);
0693     if (bdev->bd_disk->fops->release)
0694         bdev->bd_disk->fops->release(bdev->bd_disk, mode);
0695 }
0696 
0697 static int blkdev_get_part(struct block_device *part, fmode_t mode)
0698 {
0699     struct gendisk *disk = part->bd_disk;
0700     int ret;
0701 
0702     if (atomic_read(&part->bd_openers))
0703         goto done;
0704 
0705     ret = blkdev_get_whole(bdev_whole(part), mode);
0706     if (ret)
0707         return ret;
0708 
0709     ret = -ENXIO;
0710     if (!bdev_nr_sectors(part))
0711         goto out_blkdev_put;
0712 
0713     disk->open_partitions++;
0714     set_init_blocksize(part);
0715 done:
0716     atomic_inc(&part->bd_openers);
0717     return 0;
0718 
0719 out_blkdev_put:
0720     blkdev_put_whole(bdev_whole(part), mode);
0721     return ret;
0722 }
0723 
0724 static void blkdev_put_part(struct block_device *part, fmode_t mode)
0725 {
0726     struct block_device *whole = bdev_whole(part);
0727 
0728     if (!atomic_dec_and_test(&part->bd_openers))
0729         return;
0730     blkdev_flush_mapping(part);
0731     whole->bd_disk->open_partitions--;
0732     blkdev_put_whole(whole, mode);
0733 }
0734 
0735 struct block_device *blkdev_get_no_open(dev_t dev)
0736 {
0737     struct block_device *bdev;
0738     struct inode *inode;
0739 
0740     inode = ilookup(blockdev_superblock, dev);
0741     if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
0742         blk_request_module(dev);
0743         inode = ilookup(blockdev_superblock, dev);
0744         if (inode)
0745             pr_warn_ratelimited(
0746 "block device autoloading is deprecated and will be removed.\n");
0747     }
0748     if (!inode)
0749         return NULL;
0750 
0751     /* switch from the inode reference to a device mode one: */
0752     bdev = &BDEV_I(inode)->bdev;
0753     if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
0754         bdev = NULL;
0755     iput(inode);
0756     return bdev;
0757 }
0758 
0759 void blkdev_put_no_open(struct block_device *bdev)
0760 {
0761     put_device(&bdev->bd_device);
0762 }
0763 
0764 /**
0765  * blkdev_get_by_dev - open a block device by device number
0766  * @dev: device number of block device to open
0767  * @mode: FMODE_* mask
0768  * @holder: exclusive holder identifier
0769  *
0770  * Open the block device described by device number @dev. If @mode includes
0771  * %FMODE_EXCL, the block device is opened with exclusive access.  Specifying
0772  * %FMODE_EXCL with a %NULL @holder is invalid.  Exclusive opens may nest for
0773  * the same @holder.
0774  *
0775  * Use this interface ONLY if you really do not have anything better - i.e. when
0776  * you are behind a truly sucky interface and all you are given is a device
0777  * number.  Everything else should use blkdev_get_by_path().
0778  *
0779  * CONTEXT:
0780  * Might sleep.
0781  *
0782  * RETURNS:
0783  * Reference to the block_device on success, ERR_PTR(-errno) on failure.
0784  */
0785 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
0786 {
0787     bool unblock_events = true;
0788     struct block_device *bdev;
0789     struct gendisk *disk;
0790     int ret;
0791 
0792     ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
0793             MAJOR(dev), MINOR(dev),
0794             ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
0795             ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
0796     if (ret)
0797         return ERR_PTR(ret);
0798 
0799     bdev = blkdev_get_no_open(dev);
0800     if (!bdev)
0801         return ERR_PTR(-ENXIO);
0802     disk = bdev->bd_disk;
0803 
0804     if (mode & FMODE_EXCL) {
0805         ret = bd_prepare_to_claim(bdev, holder);
0806         if (ret)
0807             goto put_blkdev;
0808     }
0809 
0810     disk_block_events(disk);
0811 
0812     mutex_lock(&disk->open_mutex);
0813     ret = -ENXIO;
0814     if (!disk_live(disk))
0815         goto abort_claiming;
0816     if (!try_module_get(disk->fops->owner))
0817         goto abort_claiming;
0818     if (bdev_is_partition(bdev))
0819         ret = blkdev_get_part(bdev, mode);
0820     else
0821         ret = blkdev_get_whole(bdev, mode);
0822     if (ret)
0823         goto put_module;
0824     if (mode & FMODE_EXCL) {
0825         bd_finish_claiming(bdev, holder);
0826 
0827         /*
0828          * Block event polling for write claims if requested.  Any write
0829          * holder makes the write_holder state stick until all are
0830          * released.  This is good enough and tracking individual
0831          * writeable reference is too fragile given the way @mode is
0832          * used in blkdev_get/put().
0833          */
0834         if ((mode & FMODE_WRITE) && !bdev->bd_write_holder &&
0835             (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
0836             bdev->bd_write_holder = true;
0837             unblock_events = false;
0838         }
0839     }
0840     mutex_unlock(&disk->open_mutex);
0841 
0842     if (unblock_events)
0843         disk_unblock_events(disk);
0844     return bdev;
0845 put_module:
0846     module_put(disk->fops->owner);
0847 abort_claiming:
0848     if (mode & FMODE_EXCL)
0849         bd_abort_claiming(bdev, holder);
0850     mutex_unlock(&disk->open_mutex);
0851     disk_unblock_events(disk);
0852 put_blkdev:
0853     blkdev_put_no_open(bdev);
0854     return ERR_PTR(ret);
0855 }
0856 EXPORT_SYMBOL(blkdev_get_by_dev);
0857 
0858 /**
0859  * blkdev_get_by_path - open a block device by name
0860  * @path: path to the block device to open
0861  * @mode: FMODE_* mask
0862  * @holder: exclusive holder identifier
0863  *
0864  * Open the block device described by the device file at @path.  If @mode
0865  * includes %FMODE_EXCL, the block device is opened with exclusive access.
0866  * Specifying %FMODE_EXCL with a %NULL @holder is invalid.  Exclusive opens may
0867  * nest for the same @holder.
0868  *
0869  * CONTEXT:
0870  * Might sleep.
0871  *
0872  * RETURNS:
0873  * Reference to the block_device on success, ERR_PTR(-errno) on failure.
0874  */
0875 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
0876                     void *holder)
0877 {
0878     struct block_device *bdev;
0879     dev_t dev;
0880     int error;
0881 
0882     error = lookup_bdev(path, &dev);
0883     if (error)
0884         return ERR_PTR(error);
0885 
0886     bdev = blkdev_get_by_dev(dev, mode, holder);
0887     if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
0888         blkdev_put(bdev, mode);
0889         return ERR_PTR(-EACCES);
0890     }
0891 
0892     return bdev;
0893 }
0894 EXPORT_SYMBOL(blkdev_get_by_path);
0895 
0896 void blkdev_put(struct block_device *bdev, fmode_t mode)
0897 {
0898     struct gendisk *disk = bdev->bd_disk;
0899 
0900     /*
0901      * Sync early if it looks like we're the last one.  If someone else
0902      * opens the block device between now and the decrement of bd_openers
0903      * then we did a sync that we didn't need to, but that's not the end
0904      * of the world and we want to avoid long (could be several minute)
0905      * syncs while holding the mutex.
0906      */
0907     if (atomic_read(&bdev->bd_openers) == 1)
0908         sync_blockdev(bdev);
0909 
0910     mutex_lock(&disk->open_mutex);
0911     if (mode & FMODE_EXCL) {
0912         struct block_device *whole = bdev_whole(bdev);
0913         bool bdev_free;
0914 
0915         /*
0916          * Release a claim on the device.  The holder fields
0917          * are protected with bdev_lock.  open_mutex is to
0918          * synchronize disk_holder unlinking.
0919          */
0920         spin_lock(&bdev_lock);
0921 
0922         WARN_ON_ONCE(--bdev->bd_holders < 0);
0923         WARN_ON_ONCE(--whole->bd_holders < 0);
0924 
0925         if ((bdev_free = !bdev->bd_holders))
0926             bdev->bd_holder = NULL;
0927         if (!whole->bd_holders)
0928             whole->bd_holder = NULL;
0929 
0930         spin_unlock(&bdev_lock);
0931 
0932         /*
0933          * If this was the last claim, remove holder link and
0934          * unblock evpoll if it was a write holder.
0935          */
0936         if (bdev_free && bdev->bd_write_holder) {
0937             disk_unblock_events(disk);
0938             bdev->bd_write_holder = false;
0939         }
0940     }
0941 
0942     /*
0943      * Trigger event checking and tell drivers to flush MEDIA_CHANGE
0944      * event.  This is to ensure detection of media removal commanded
0945      * from userland - e.g. eject(1).
0946      */
0947     disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
0948 
0949     if (bdev_is_partition(bdev))
0950         blkdev_put_part(bdev, mode);
0951     else
0952         blkdev_put_whole(bdev, mode);
0953     mutex_unlock(&disk->open_mutex);
0954 
0955     module_put(disk->fops->owner);
0956     blkdev_put_no_open(bdev);
0957 }
0958 EXPORT_SYMBOL(blkdev_put);
0959 
0960 /**
0961  * lookup_bdev() - Look up a struct block_device by name.
0962  * @pathname: Name of the block device in the filesystem.
0963  * @dev: Pointer to the block device's dev_t, if found.
0964  *
0965  * Lookup the block device's dev_t at @pathname in the current
0966  * namespace if possible and return it in @dev.
0967  *
0968  * Context: May sleep.
0969  * Return: 0 if succeeded, negative errno otherwise.
0970  */
0971 int lookup_bdev(const char *pathname, dev_t *dev)
0972 {
0973     struct inode *inode;
0974     struct path path;
0975     int error;
0976 
0977     if (!pathname || !*pathname)
0978         return -EINVAL;
0979 
0980     error = kern_path(pathname, LOOKUP_FOLLOW, &path);
0981     if (error)
0982         return error;
0983 
0984     inode = d_backing_inode(path.dentry);
0985     error = -ENOTBLK;
0986     if (!S_ISBLK(inode->i_mode))
0987         goto out_path_put;
0988     error = -EACCES;
0989     if (!may_open_dev(&path))
0990         goto out_path_put;
0991 
0992     *dev = inode->i_rdev;
0993     error = 0;
0994 out_path_put:
0995     path_put(&path);
0996     return error;
0997 }
0998 EXPORT_SYMBOL(lookup_bdev);
0999 
1000 int __invalidate_device(struct block_device *bdev, bool kill_dirty)
1001 {
1002     struct super_block *sb = get_super(bdev);
1003     int res = 0;
1004 
1005     if (sb) {
1006         /*
1007          * no need to lock the super, get_super holds the
1008          * read mutex so the filesystem cannot go away
1009          * under us (->put_super runs with the write lock
1010          * hold).
1011          */
1012         shrink_dcache_sb(sb);
1013         res = invalidate_inodes(sb, kill_dirty);
1014         drop_super(sb);
1015     }
1016     invalidate_bdev(bdev);
1017     return res;
1018 }
1019 EXPORT_SYMBOL(__invalidate_device);
1020 
1021 void sync_bdevs(bool wait)
1022 {
1023     struct inode *inode, *old_inode = NULL;
1024 
1025     spin_lock(&blockdev_superblock->s_inode_list_lock);
1026     list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1027         struct address_space *mapping = inode->i_mapping;
1028         struct block_device *bdev;
1029 
1030         spin_lock(&inode->i_lock);
1031         if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1032             mapping->nrpages == 0) {
1033             spin_unlock(&inode->i_lock);
1034             continue;
1035         }
1036         __iget(inode);
1037         spin_unlock(&inode->i_lock);
1038         spin_unlock(&blockdev_superblock->s_inode_list_lock);
1039         /*
1040          * We hold a reference to 'inode' so it couldn't have been
1041          * removed from s_inodes list while we dropped the
1042          * s_inode_list_lock  We cannot iput the inode now as we can
1043          * be holding the last reference and we cannot iput it under
1044          * s_inode_list_lock. So we keep the reference and iput it
1045          * later.
1046          */
1047         iput(old_inode);
1048         old_inode = inode;
1049         bdev = I_BDEV(inode);
1050 
1051         mutex_lock(&bdev->bd_disk->open_mutex);
1052         if (!atomic_read(&bdev->bd_openers)) {
1053             ; /* skip */
1054         } else if (wait) {
1055             /*
1056              * We keep the error status of individual mapping so
1057              * that applications can catch the writeback error using
1058              * fsync(2). See filemap_fdatawait_keep_errors() for
1059              * details.
1060              */
1061             filemap_fdatawait_keep_errors(inode->i_mapping);
1062         } else {
1063             filemap_fdatawrite(inode->i_mapping);
1064         }
1065         mutex_unlock(&bdev->bd_disk->open_mutex);
1066 
1067         spin_lock(&blockdev_superblock->s_inode_list_lock);
1068     }
1069     spin_unlock(&blockdev_superblock->s_inode_list_lock);
1070     iput(old_inode);
1071 }