Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2014 Christoph Hellwig.
0004  */
0005 #include "xfs.h"
0006 #include "xfs_shared.h"
0007 #include "xfs_format.h"
0008 #include "xfs_log_format.h"
0009 #include "xfs_trans_resv.h"
0010 #include "xfs_mount.h"
0011 #include "xfs_inode.h"
0012 #include "xfs_trans.h"
0013 #include "xfs_bmap.h"
0014 #include "xfs_iomap.h"
0015 #include "xfs_pnfs.h"
0016 
0017 /*
0018  * Ensure that we do not have any outstanding pNFS layouts that can be used by
0019  * clients to directly read from or write to this inode.  This must be called
0020  * before every operation that can remove blocks from the extent map.
0021  * Additionally we call it during the write operation, where aren't concerned
0022  * about exposing unallocated blocks but just want to provide basic
0023  * synchronization between a local writer and pNFS clients.  mmap writes would
0024  * also benefit from this sort of synchronization, but due to the tricky locking
0025  * rules in the page fault path we don't bother.
0026  */
0027 int
0028 xfs_break_leased_layouts(
0029     struct inode        *inode,
0030     uint            *iolock,
0031     bool            *did_unlock)
0032 {
0033     struct xfs_inode    *ip = XFS_I(inode);
0034     int         error;
0035 
0036     while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
0037         xfs_iunlock(ip, *iolock);
0038         *did_unlock = true;
0039         error = break_layout(inode, true);
0040         *iolock &= ~XFS_IOLOCK_SHARED;
0041         *iolock |= XFS_IOLOCK_EXCL;
0042         xfs_ilock(ip, *iolock);
0043     }
0044 
0045     return error;
0046 }
0047 
0048 /*
0049  * Get a unique ID including its location so that the client can identify
0050  * the exported device.
0051  */
0052 int
0053 xfs_fs_get_uuid(
0054     struct super_block  *sb,
0055     u8          *buf,
0056     u32         *len,
0057     u64         *offset)
0058 {
0059     struct xfs_mount    *mp = XFS_M(sb);
0060 
0061     xfs_notice_once(mp,
0062 "Using experimental pNFS feature, use at your own risk!");
0063 
0064     if (*len < sizeof(uuid_t))
0065         return -EINVAL;
0066 
0067     memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
0068     *len = sizeof(uuid_t);
0069     *offset = offsetof(struct xfs_dsb, sb_uuid);
0070     return 0;
0071 }
0072 
0073 /*
0074  * We cannot use file based VFS helpers such as file_modified() to update
0075  * inode state as we modify the data/metadata in the inode here. Hence we have
0076  * to open code the timestamp updates and SUID/SGID stripping. We also need
0077  * to set the inode prealloc flag to ensure that the extents we allocate are not
0078  * removed if the inode is reclaimed from memory before xfs_fs_block_commit()
0079  * is from the client to indicate that data has been written and the file size
0080  * can be extended.
0081  */
0082 static int
0083 xfs_fs_map_update_inode(
0084     struct xfs_inode    *ip)
0085 {
0086     struct xfs_trans    *tp;
0087     int         error;
0088 
0089     error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
0090             0, 0, 0, &tp);
0091     if (error)
0092         return error;
0093 
0094     xfs_ilock(ip, XFS_ILOCK_EXCL);
0095     xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
0096 
0097     VFS_I(ip)->i_mode &= ~S_ISUID;
0098     if (VFS_I(ip)->i_mode & S_IXGRP)
0099         VFS_I(ip)->i_mode &= ~S_ISGID;
0100     xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
0101     ip->i_diflags |= XFS_DIFLAG_PREALLOC;
0102 
0103     xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
0104     return xfs_trans_commit(tp);
0105 }
0106 
0107 /*
0108  * Get a layout for the pNFS client.
0109  */
0110 int
0111 xfs_fs_map_blocks(
0112     struct inode        *inode,
0113     loff_t          offset,
0114     u64         length,
0115     struct iomap        *iomap,
0116     bool            write,
0117     u32         *device_generation)
0118 {
0119     struct xfs_inode    *ip = XFS_I(inode);
0120     struct xfs_mount    *mp = ip->i_mount;
0121     struct xfs_bmbt_irec    imap;
0122     xfs_fileoff_t       offset_fsb, end_fsb;
0123     loff_t          limit;
0124     int         bmapi_flags = XFS_BMAPI_ENTIRE;
0125     int         nimaps = 1;
0126     uint            lock_flags;
0127     int         error = 0;
0128 
0129     if (xfs_is_shutdown(mp))
0130         return -EIO;
0131 
0132     /*
0133      * We can't export inodes residing on the realtime device.  The realtime
0134      * device doesn't have a UUID to identify it, so the client has no way
0135      * to find it.
0136      */
0137     if (XFS_IS_REALTIME_INODE(ip))
0138         return -ENXIO;
0139 
0140     /*
0141      * The pNFS block layout spec actually supports reflink like
0142      * functionality, but the Linux pNFS server doesn't implement it yet.
0143      */
0144     if (xfs_is_reflink_inode(ip))
0145         return -ENXIO;
0146 
0147     /*
0148      * Lock out any other I/O before we flush and invalidate the pagecache,
0149      * and then hand out a layout to the remote system.  This is very
0150      * similar to direct I/O, except that the synchronization is much more
0151      * complicated.  See the comment near xfs_break_leased_layouts
0152      * for a detailed explanation.
0153      */
0154     xfs_ilock(ip, XFS_IOLOCK_EXCL);
0155 
0156     error = -EINVAL;
0157     limit = mp->m_super->s_maxbytes;
0158     if (!write)
0159         limit = max(limit, round_up(i_size_read(inode),
0160                      inode->i_sb->s_blocksize));
0161     if (offset > limit)
0162         goto out_unlock;
0163     if (offset > limit - length)
0164         length = limit - offset;
0165 
0166     error = filemap_write_and_wait(inode->i_mapping);
0167     if (error)
0168         goto out_unlock;
0169     error = invalidate_inode_pages2(inode->i_mapping);
0170     if (WARN_ON_ONCE(error))
0171         goto out_unlock;
0172 
0173     end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
0174     offset_fsb = XFS_B_TO_FSBT(mp, offset);
0175 
0176     lock_flags = xfs_ilock_data_map_shared(ip);
0177     error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
0178                 &imap, &nimaps, bmapi_flags);
0179 
0180     ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK);
0181 
0182     if (!error && write &&
0183         (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) {
0184         if (offset + length > XFS_ISIZE(ip))
0185             end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
0186         else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
0187             end_fsb = min(end_fsb, imap.br_startoff +
0188                            imap.br_blockcount);
0189         xfs_iunlock(ip, lock_flags);
0190 
0191         error = xfs_iomap_write_direct(ip, offset_fsb,
0192                 end_fsb - offset_fsb, 0, &imap);
0193         if (error)
0194             goto out_unlock;
0195 
0196         /*
0197          * Ensure the next transaction is committed synchronously so
0198          * that the blocks allocated and handed out to the client are
0199          * guaranteed to be present even after a server crash.
0200          */
0201         error = xfs_fs_map_update_inode(ip);
0202         if (!error)
0203             error = xfs_log_force_inode(ip);
0204         if (error)
0205             goto out_unlock;
0206 
0207     } else {
0208         xfs_iunlock(ip, lock_flags);
0209     }
0210     xfs_iunlock(ip, XFS_IOLOCK_EXCL);
0211 
0212     error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0, 0);
0213     *device_generation = mp->m_generation;
0214     return error;
0215 out_unlock:
0216     xfs_iunlock(ip, XFS_IOLOCK_EXCL);
0217     return error;
0218 }
0219 
0220 /*
0221  * Ensure the size update falls into a valid allocated block.
0222  */
0223 static int
0224 xfs_pnfs_validate_isize(
0225     struct xfs_inode    *ip,
0226     xfs_off_t       isize)
0227 {
0228     struct xfs_bmbt_irec    imap;
0229     int         nimaps = 1;
0230     int         error = 0;
0231 
0232     xfs_ilock(ip, XFS_ILOCK_SHARED);
0233     error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
0234                 &imap, &nimaps, 0);
0235     xfs_iunlock(ip, XFS_ILOCK_SHARED);
0236     if (error)
0237         return error;
0238 
0239     if (imap.br_startblock == HOLESTARTBLOCK ||
0240         imap.br_startblock == DELAYSTARTBLOCK ||
0241         imap.br_state == XFS_EXT_UNWRITTEN)
0242         return -EIO;
0243     return 0;
0244 }
0245 
0246 /*
0247  * Make sure the blocks described by maps are stable on disk.  This includes
0248  * converting any unwritten extents, flushing the disk cache and updating the
0249  * time stamps.
0250  *
0251  * Note that we rely on the caller to always send us a timestamp update so that
0252  * we always commit a transaction here.  If that stops being true we will have
0253  * to manually flush the cache here similar to what the fsync code path does
0254  * for datasyncs on files that have no dirty metadata.
0255  */
0256 int
0257 xfs_fs_commit_blocks(
0258     struct inode        *inode,
0259     struct iomap        *maps,
0260     int         nr_maps,
0261     struct iattr        *iattr)
0262 {
0263     struct xfs_inode    *ip = XFS_I(inode);
0264     struct xfs_mount    *mp = ip->i_mount;
0265     struct xfs_trans    *tp;
0266     bool            update_isize = false;
0267     int         error, i;
0268     loff_t          size;
0269 
0270     ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
0271 
0272     xfs_ilock(ip, XFS_IOLOCK_EXCL);
0273 
0274     size = i_size_read(inode);
0275     if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
0276         update_isize = true;
0277         size = iattr->ia_size;
0278     }
0279 
0280     for (i = 0; i < nr_maps; i++) {
0281         u64 start, length, end;
0282 
0283         start = maps[i].offset;
0284         if (start > size)
0285             continue;
0286 
0287         end = start + maps[i].length;
0288         if (end > size)
0289             end = size;
0290 
0291         length = end - start;
0292         if (!length)
0293             continue;
0294 
0295         /*
0296          * Make sure reads through the pagecache see the new data.
0297          */
0298         error = invalidate_inode_pages2_range(inode->i_mapping,
0299                     start >> PAGE_SHIFT,
0300                     (end - 1) >> PAGE_SHIFT);
0301         WARN_ON_ONCE(error);
0302 
0303         error = xfs_iomap_write_unwritten(ip, start, length, false);
0304         if (error)
0305             goto out_drop_iolock;
0306     }
0307 
0308     if (update_isize) {
0309         error = xfs_pnfs_validate_isize(ip, size);
0310         if (error)
0311             goto out_drop_iolock;
0312     }
0313 
0314     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
0315     if (error)
0316         goto out_drop_iolock;
0317 
0318     xfs_ilock(ip, XFS_ILOCK_EXCL);
0319     xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
0320     xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
0321 
0322     ASSERT(!(iattr->ia_valid & (ATTR_UID | ATTR_GID)));
0323     setattr_copy(&init_user_ns, inode, iattr);
0324     if (update_isize) {
0325         i_size_write(inode, iattr->ia_size);
0326         ip->i_disk_size = iattr->ia_size;
0327     }
0328 
0329     xfs_trans_set_sync(tp);
0330     error = xfs_trans_commit(tp);
0331 
0332 out_drop_iolock:
0333     xfs_iunlock(ip, XFS_IOLOCK_EXCL);
0334     return error;
0335 }