Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
0004  * All Rights Reserved.
0005  */
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_mount.h"
0013 #include "xfs_inode.h"
0014 #include "xfs_rtalloc.h"
0015 #include "xfs_iwalk.h"
0016 #include "xfs_itable.h"
0017 #include "xfs_error.h"
0018 #include "xfs_da_format.h"
0019 #include "xfs_da_btree.h"
0020 #include "xfs_attr.h"
0021 #include "xfs_bmap.h"
0022 #include "xfs_bmap_util.h"
0023 #include "xfs_fsops.h"
0024 #include "xfs_discard.h"
0025 #include "xfs_quota.h"
0026 #include "xfs_export.h"
0027 #include "xfs_trace.h"
0028 #include "xfs_icache.h"
0029 #include "xfs_trans.h"
0030 #include "xfs_acl.h"
0031 #include "xfs_btree.h"
0032 #include <linux/fsmap.h>
0033 #include "xfs_fsmap.h"
0034 #include "scrub/xfs_scrub.h"
0035 #include "xfs_sb.h"
0036 #include "xfs_ag.h"
0037 #include "xfs_health.h"
0038 #include "xfs_reflink.h"
0039 #include "xfs_ioctl.h"
0040 #include "xfs_xattr.h"
0041 
0042 #include <linux/mount.h>
0043 #include <linux/namei.h>
0044 #include <linux/fileattr.h>
0045 
0046 /*
0047  * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
0048  * a file or fs handle.
0049  *
0050  * XFS_IOC_PATH_TO_FSHANDLE
0051  *    returns fs handle for a mount point or path within that mount point
0052  * XFS_IOC_FD_TO_HANDLE
0053  *    returns full handle for a FD opened in user space
0054  * XFS_IOC_PATH_TO_HANDLE
0055  *    returns full handle for a path
0056  */
0057 int
0058 xfs_find_handle(
0059     unsigned int        cmd,
0060     xfs_fsop_handlereq_t    *hreq)
0061 {
0062     int         hsize;
0063     xfs_handle_t        handle;
0064     struct inode        *inode;
0065     struct fd       f = {NULL};
0066     struct path     path;
0067     int         error;
0068     struct xfs_inode    *ip;
0069 
0070     if (cmd == XFS_IOC_FD_TO_HANDLE) {
0071         f = fdget(hreq->fd);
0072         if (!f.file)
0073             return -EBADF;
0074         inode = file_inode(f.file);
0075     } else {
0076         error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
0077         if (error)
0078             return error;
0079         inode = d_inode(path.dentry);
0080     }
0081     ip = XFS_I(inode);
0082 
0083     /*
0084      * We can only generate handles for inodes residing on a XFS filesystem,
0085      * and only for regular files, directories or symbolic links.
0086      */
0087     error = -EINVAL;
0088     if (inode->i_sb->s_magic != XFS_SB_MAGIC)
0089         goto out_put;
0090 
0091     error = -EBADF;
0092     if (!S_ISREG(inode->i_mode) &&
0093         !S_ISDIR(inode->i_mode) &&
0094         !S_ISLNK(inode->i_mode))
0095         goto out_put;
0096 
0097 
0098     memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
0099 
0100     if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
0101         /*
0102          * This handle only contains an fsid, zero the rest.
0103          */
0104         memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
0105         hsize = sizeof(xfs_fsid_t);
0106     } else {
0107         handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
0108                     sizeof(handle.ha_fid.fid_len);
0109         handle.ha_fid.fid_pad = 0;
0110         handle.ha_fid.fid_gen = inode->i_generation;
0111         handle.ha_fid.fid_ino = ip->i_ino;
0112         hsize = sizeof(xfs_handle_t);
0113     }
0114 
0115     error = -EFAULT;
0116     if (copy_to_user(hreq->ohandle, &handle, hsize) ||
0117         copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
0118         goto out_put;
0119 
0120     error = 0;
0121 
0122  out_put:
0123     if (cmd == XFS_IOC_FD_TO_HANDLE)
0124         fdput(f);
0125     else
0126         path_put(&path);
0127     return error;
0128 }
0129 
0130 /*
0131  * No need to do permission checks on the various pathname components
0132  * as the handle operations are privileged.
0133  */
0134 STATIC int
0135 xfs_handle_acceptable(
0136     void            *context,
0137     struct dentry       *dentry)
0138 {
0139     return 1;
0140 }
0141 
0142 /*
0143  * Convert userspace handle data into a dentry.
0144  */
0145 struct dentry *
0146 xfs_handle_to_dentry(
0147     struct file     *parfilp,
0148     void __user     *uhandle,
0149     u32         hlen)
0150 {
0151     xfs_handle_t        handle;
0152     struct xfs_fid64    fid;
0153 
0154     /*
0155      * Only allow handle opens under a directory.
0156      */
0157     if (!S_ISDIR(file_inode(parfilp)->i_mode))
0158         return ERR_PTR(-ENOTDIR);
0159 
0160     if (hlen != sizeof(xfs_handle_t))
0161         return ERR_PTR(-EINVAL);
0162     if (copy_from_user(&handle, uhandle, hlen))
0163         return ERR_PTR(-EFAULT);
0164     if (handle.ha_fid.fid_len !=
0165         sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len))
0166         return ERR_PTR(-EINVAL);
0167 
0168     memset(&fid, 0, sizeof(struct fid));
0169     fid.ino = handle.ha_fid.fid_ino;
0170     fid.gen = handle.ha_fid.fid_gen;
0171 
0172     return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3,
0173             FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
0174             xfs_handle_acceptable, NULL);
0175 }
0176 
0177 STATIC struct dentry *
0178 xfs_handlereq_to_dentry(
0179     struct file     *parfilp,
0180     xfs_fsop_handlereq_t    *hreq)
0181 {
0182     return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
0183 }
0184 
0185 int
0186 xfs_open_by_handle(
0187     struct file     *parfilp,
0188     xfs_fsop_handlereq_t    *hreq)
0189 {
0190     const struct cred   *cred = current_cred();
0191     int         error;
0192     int         fd;
0193     int         permflag;
0194     struct file     *filp;
0195     struct inode        *inode;
0196     struct dentry       *dentry;
0197     fmode_t         fmode;
0198     struct path     path;
0199 
0200     if (!capable(CAP_SYS_ADMIN))
0201         return -EPERM;
0202 
0203     dentry = xfs_handlereq_to_dentry(parfilp, hreq);
0204     if (IS_ERR(dentry))
0205         return PTR_ERR(dentry);
0206     inode = d_inode(dentry);
0207 
0208     /* Restrict xfs_open_by_handle to directories & regular files. */
0209     if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
0210         error = -EPERM;
0211         goto out_dput;
0212     }
0213 
0214 #if BITS_PER_LONG != 32
0215     hreq->oflags |= O_LARGEFILE;
0216 #endif
0217 
0218     permflag = hreq->oflags;
0219     fmode = OPEN_FMODE(permflag);
0220     if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
0221         (fmode & FMODE_WRITE) && IS_APPEND(inode)) {
0222         error = -EPERM;
0223         goto out_dput;
0224     }
0225 
0226     if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
0227         error = -EPERM;
0228         goto out_dput;
0229     }
0230 
0231     /* Can't write directories. */
0232     if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) {
0233         error = -EISDIR;
0234         goto out_dput;
0235     }
0236 
0237     fd = get_unused_fd_flags(0);
0238     if (fd < 0) {
0239         error = fd;
0240         goto out_dput;
0241     }
0242 
0243     path.mnt = parfilp->f_path.mnt;
0244     path.dentry = dentry;
0245     filp = dentry_open(&path, hreq->oflags, cred);
0246     dput(dentry);
0247     if (IS_ERR(filp)) {
0248         put_unused_fd(fd);
0249         return PTR_ERR(filp);
0250     }
0251 
0252     if (S_ISREG(inode->i_mode)) {
0253         filp->f_flags |= O_NOATIME;
0254         filp->f_mode |= FMODE_NOCMTIME;
0255     }
0256 
0257     fd_install(fd, filp);
0258     return fd;
0259 
0260  out_dput:
0261     dput(dentry);
0262     return error;
0263 }
0264 
0265 int
0266 xfs_readlink_by_handle(
0267     struct file     *parfilp,
0268     xfs_fsop_handlereq_t    *hreq)
0269 {
0270     struct dentry       *dentry;
0271     __u32           olen;
0272     int         error;
0273 
0274     if (!capable(CAP_SYS_ADMIN))
0275         return -EPERM;
0276 
0277     dentry = xfs_handlereq_to_dentry(parfilp, hreq);
0278     if (IS_ERR(dentry))
0279         return PTR_ERR(dentry);
0280 
0281     /* Restrict this handle operation to symlinks only. */
0282     if (!d_is_symlink(dentry)) {
0283         error = -EINVAL;
0284         goto out_dput;
0285     }
0286 
0287     if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
0288         error = -EFAULT;
0289         goto out_dput;
0290     }
0291 
0292     error = vfs_readlink(dentry, hreq->ohandle, olen);
0293 
0294  out_dput:
0295     dput(dentry);
0296     return error;
0297 }
0298 
0299 /*
0300  * Format an attribute and copy it out to the user's buffer.
0301  * Take care to check values and protect against them changing later,
0302  * we may be reading them directly out of a user buffer.
0303  */
0304 static void
0305 xfs_ioc_attr_put_listent(
0306     struct xfs_attr_list_context *context,
0307     int         flags,
0308     unsigned char       *name,
0309     int         namelen,
0310     int         valuelen)
0311 {
0312     struct xfs_attrlist *alist = context->buffer;
0313     struct xfs_attrlist_ent *aep;
0314     int         arraytop;
0315 
0316     ASSERT(!context->seen_enough);
0317     ASSERT(context->count >= 0);
0318     ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
0319     ASSERT(context->firstu >= sizeof(*alist));
0320     ASSERT(context->firstu <= context->bufsize);
0321 
0322     /*
0323      * Only list entries in the right namespace.
0324      */
0325     if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK))
0326         return;
0327 
0328     arraytop = sizeof(*alist) +
0329             context->count * sizeof(alist->al_offset[0]);
0330 
0331     /* decrement by the actual bytes used by the attr */
0332     context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) +
0333             namelen + 1, sizeof(uint32_t));
0334     if (context->firstu < arraytop) {
0335         trace_xfs_attr_list_full(context);
0336         alist->al_more = 1;
0337         context->seen_enough = 1;
0338         return;
0339     }
0340 
0341     aep = context->buffer + context->firstu;
0342     aep->a_valuelen = valuelen;
0343     memcpy(aep->a_name, name, namelen);
0344     aep->a_name[namelen] = 0;
0345     alist->al_offset[context->count++] = context->firstu;
0346     alist->al_count = context->count;
0347     trace_xfs_attr_list_add(context);
0348 }
0349 
0350 static unsigned int
0351 xfs_attr_filter(
0352     u32         ioc_flags)
0353 {
0354     if (ioc_flags & XFS_IOC_ATTR_ROOT)
0355         return XFS_ATTR_ROOT;
0356     if (ioc_flags & XFS_IOC_ATTR_SECURE)
0357         return XFS_ATTR_SECURE;
0358     return 0;
0359 }
0360 
0361 static unsigned int
0362 xfs_attr_flags(
0363     u32         ioc_flags)
0364 {
0365     if (ioc_flags & XFS_IOC_ATTR_CREATE)
0366         return XATTR_CREATE;
0367     if (ioc_flags & XFS_IOC_ATTR_REPLACE)
0368         return XATTR_REPLACE;
0369     return 0;
0370 }
0371 
0372 int
0373 xfs_ioc_attr_list(
0374     struct xfs_inode        *dp,
0375     void __user         *ubuf,
0376     size_t              bufsize,
0377     int             flags,
0378     struct xfs_attrlist_cursor __user *ucursor)
0379 {
0380     struct xfs_attr_list_context    context = { };
0381     struct xfs_attrlist     *alist;
0382     void                *buffer;
0383     int             error;
0384 
0385     if (bufsize < sizeof(struct xfs_attrlist) ||
0386         bufsize > XFS_XATTR_LIST_MAX)
0387         return -EINVAL;
0388 
0389     /*
0390      * Reject flags, only allow namespaces.
0391      */
0392     if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
0393         return -EINVAL;
0394     if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
0395         return -EINVAL;
0396 
0397     /*
0398      * Validate the cursor.
0399      */
0400     if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor)))
0401         return -EFAULT;
0402     if (context.cursor.pad1 || context.cursor.pad2)
0403         return -EINVAL;
0404     if (!context.cursor.initted &&
0405         (context.cursor.hashval || context.cursor.blkno ||
0406          context.cursor.offset))
0407         return -EINVAL;
0408 
0409     buffer = kvzalloc(bufsize, GFP_KERNEL);
0410     if (!buffer)
0411         return -ENOMEM;
0412 
0413     /*
0414      * Initialize the output buffer.
0415      */
0416     context.dp = dp;
0417     context.resynch = 1;
0418     context.attr_filter = xfs_attr_filter(flags);
0419     context.buffer = buffer;
0420     context.bufsize = round_down(bufsize, sizeof(uint32_t));
0421     context.firstu = context.bufsize;
0422     context.put_listent = xfs_ioc_attr_put_listent;
0423 
0424     alist = context.buffer;
0425     alist->al_count = 0;
0426     alist->al_more = 0;
0427     alist->al_offset[0] = context.bufsize;
0428 
0429     error = xfs_attr_list(&context);
0430     if (error)
0431         goto out_free;
0432 
0433     if (copy_to_user(ubuf, buffer, bufsize) ||
0434         copy_to_user(ucursor, &context.cursor, sizeof(context.cursor)))
0435         error = -EFAULT;
0436 out_free:
0437     kmem_free(buffer);
0438     return error;
0439 }
0440 
0441 STATIC int
0442 xfs_attrlist_by_handle(
0443     struct file     *parfilp,
0444     struct xfs_fsop_attrlist_handlereq __user *p)
0445 {
0446     struct xfs_fsop_attrlist_handlereq al_hreq;
0447     struct dentry       *dentry;
0448     int         error = -ENOMEM;
0449 
0450     if (!capable(CAP_SYS_ADMIN))
0451         return -EPERM;
0452     if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
0453         return -EFAULT;
0454 
0455     dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
0456     if (IS_ERR(dentry))
0457         return PTR_ERR(dentry);
0458 
0459     error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer,
0460                   al_hreq.buflen, al_hreq.flags, &p->pos);
0461     dput(dentry);
0462     return error;
0463 }
0464 
0465 static int
0466 xfs_attrmulti_attr_get(
0467     struct inode        *inode,
0468     unsigned char       *name,
0469     unsigned char       __user *ubuf,
0470     uint32_t        *len,
0471     uint32_t        flags)
0472 {
0473     struct xfs_da_args  args = {
0474         .dp     = XFS_I(inode),
0475         .attr_filter    = xfs_attr_filter(flags),
0476         .attr_flags = xfs_attr_flags(flags),
0477         .name       = name,
0478         .namelen    = strlen(name),
0479         .valuelen   = *len,
0480     };
0481     int         error;
0482 
0483     if (*len > XFS_XATTR_SIZE_MAX)
0484         return -EINVAL;
0485 
0486     error = xfs_attr_get(&args);
0487     if (error)
0488         goto out_kfree;
0489 
0490     *len = args.valuelen;
0491     if (copy_to_user(ubuf, args.value, args.valuelen))
0492         error = -EFAULT;
0493 
0494 out_kfree:
0495     kmem_free(args.value);
0496     return error;
0497 }
0498 
0499 static int
0500 xfs_attrmulti_attr_set(
0501     struct inode        *inode,
0502     unsigned char       *name,
0503     const unsigned char __user *ubuf,
0504     uint32_t        len,
0505     uint32_t        flags)
0506 {
0507     struct xfs_da_args  args = {
0508         .dp     = XFS_I(inode),
0509         .attr_filter    = xfs_attr_filter(flags),
0510         .attr_flags = xfs_attr_flags(flags),
0511         .name       = name,
0512         .namelen    = strlen(name),
0513     };
0514     int         error;
0515 
0516     if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
0517         return -EPERM;
0518 
0519     if (ubuf) {
0520         if (len > XFS_XATTR_SIZE_MAX)
0521             return -EINVAL;
0522         args.value = memdup_user(ubuf, len);
0523         if (IS_ERR(args.value))
0524             return PTR_ERR(args.value);
0525         args.valuelen = len;
0526     }
0527 
0528     error = xfs_attr_change(&args);
0529     if (!error && (flags & XFS_IOC_ATTR_ROOT))
0530         xfs_forget_acl(inode, name);
0531     kfree(args.value);
0532     return error;
0533 }
0534 
0535 int
0536 xfs_ioc_attrmulti_one(
0537     struct file     *parfilp,
0538     struct inode        *inode,
0539     uint32_t        opcode,
0540     void __user     *uname,
0541     void __user     *value,
0542     uint32_t        *len,
0543     uint32_t        flags)
0544 {
0545     unsigned char       *name;
0546     int         error;
0547 
0548     if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE))
0549         return -EINVAL;
0550 
0551     name = strndup_user(uname, MAXNAMELEN);
0552     if (IS_ERR(name))
0553         return PTR_ERR(name);
0554 
0555     switch (opcode) {
0556     case ATTR_OP_GET:
0557         error = xfs_attrmulti_attr_get(inode, name, value, len, flags);
0558         break;
0559     case ATTR_OP_REMOVE:
0560         value = NULL;
0561         *len = 0;
0562         fallthrough;
0563     case ATTR_OP_SET:
0564         error = mnt_want_write_file(parfilp);
0565         if (error)
0566             break;
0567         error = xfs_attrmulti_attr_set(inode, name, value, *len, flags);
0568         mnt_drop_write_file(parfilp);
0569         break;
0570     default:
0571         error = -EINVAL;
0572         break;
0573     }
0574 
0575     kfree(name);
0576     return error;
0577 }
0578 
0579 STATIC int
0580 xfs_attrmulti_by_handle(
0581     struct file     *parfilp,
0582     void            __user *arg)
0583 {
0584     int         error;
0585     xfs_attr_multiop_t  *ops;
0586     xfs_fsop_attrmulti_handlereq_t am_hreq;
0587     struct dentry       *dentry;
0588     unsigned int        i, size;
0589 
0590     if (!capable(CAP_SYS_ADMIN))
0591         return -EPERM;
0592     if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
0593         return -EFAULT;
0594 
0595     /* overflow check */
0596     if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
0597         return -E2BIG;
0598 
0599     dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
0600     if (IS_ERR(dentry))
0601         return PTR_ERR(dentry);
0602 
0603     error = -E2BIG;
0604     size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
0605     if (!size || size > 16 * PAGE_SIZE)
0606         goto out_dput;
0607 
0608     ops = memdup_user(am_hreq.ops, size);
0609     if (IS_ERR(ops)) {
0610         error = PTR_ERR(ops);
0611         goto out_dput;
0612     }
0613 
0614     error = 0;
0615     for (i = 0; i < am_hreq.opcount; i++) {
0616         ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
0617                 d_inode(dentry), ops[i].am_opcode,
0618                 ops[i].am_attrname, ops[i].am_attrvalue,
0619                 &ops[i].am_length, ops[i].am_flags);
0620     }
0621 
0622     if (copy_to_user(am_hreq.ops, ops, size))
0623         error = -EFAULT;
0624 
0625     kfree(ops);
0626  out_dput:
0627     dput(dentry);
0628     return error;
0629 }
0630 
0631 /* Return 0 on success or positive error */
0632 int
0633 xfs_fsbulkstat_one_fmt(
0634     struct xfs_ibulk        *breq,
0635     const struct xfs_bulkstat   *bstat)
0636 {
0637     struct xfs_bstat        bs1;
0638 
0639     xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
0640     if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1)))
0641         return -EFAULT;
0642     return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat));
0643 }
0644 
0645 int
0646 xfs_fsinumbers_fmt(
0647     struct xfs_ibulk        *breq,
0648     const struct xfs_inumbers   *igrp)
0649 {
0650     struct xfs_inogrp       ig1;
0651 
0652     xfs_inumbers_to_inogrp(&ig1, igrp);
0653     if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp)))
0654         return -EFAULT;
0655     return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp));
0656 }
0657 
0658 STATIC int
0659 xfs_ioc_fsbulkstat(
0660     struct file     *file,
0661     unsigned int        cmd,
0662     void            __user *arg)
0663 {
0664     struct xfs_mount    *mp = XFS_I(file_inode(file))->i_mount;
0665     struct xfs_fsop_bulkreq bulkreq;
0666     struct xfs_ibulk    breq = {
0667         .mp     = mp,
0668         .mnt_userns = file_mnt_user_ns(file),
0669         .ocount     = 0,
0670     };
0671     xfs_ino_t       lastino;
0672     int         error;
0673 
0674     /* done = 1 if there are more stats to get and if bulkstat */
0675     /* should be called again (unused here, but used in dmapi) */
0676 
0677     if (!capable(CAP_SYS_ADMIN))
0678         return -EPERM;
0679 
0680     if (xfs_is_shutdown(mp))
0681         return -EIO;
0682 
0683     if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq)))
0684         return -EFAULT;
0685 
0686     if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
0687         return -EFAULT;
0688 
0689     if (bulkreq.icount <= 0)
0690         return -EINVAL;
0691 
0692     if (bulkreq.ubuffer == NULL)
0693         return -EINVAL;
0694 
0695     breq.ubuffer = bulkreq.ubuffer;
0696     breq.icount = bulkreq.icount;
0697 
0698     /*
0699      * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
0700      * that we want to stat.  However, FSINUMBERS and FSBULKSTAT expect
0701      * that *lastip contains either zero or the number of the last inode to
0702      * be examined by the previous call and return results starting with
0703      * the next inode after that.  The new bulk request back end functions
0704      * take the inode to start with, so we have to compute the startino
0705      * parameter from lastino to maintain correct function.  lastino == 0
0706      * is a special case because it has traditionally meant "first inode
0707      * in filesystem".
0708      */
0709     if (cmd == XFS_IOC_FSINUMBERS) {
0710         breq.startino = lastino ? lastino + 1 : 0;
0711         error = xfs_inumbers(&breq, xfs_fsinumbers_fmt);
0712         lastino = breq.startino - 1;
0713     } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) {
0714         breq.startino = lastino;
0715         breq.icount = 1;
0716         error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt);
0717     } else {    /* XFS_IOC_FSBULKSTAT */
0718         breq.startino = lastino ? lastino + 1 : 0;
0719         error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt);
0720         lastino = breq.startino - 1;
0721     }
0722 
0723     if (error)
0724         return error;
0725 
0726     if (bulkreq.lastip != NULL &&
0727         copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
0728         return -EFAULT;
0729 
0730     if (bulkreq.ocount != NULL &&
0731         copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
0732         return -EFAULT;
0733 
0734     return 0;
0735 }
0736 
0737 /* Return 0 on success or positive error */
0738 static int
0739 xfs_bulkstat_fmt(
0740     struct xfs_ibulk        *breq,
0741     const struct xfs_bulkstat   *bstat)
0742 {
0743     if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
0744         return -EFAULT;
0745     return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
0746 }
0747 
0748 /*
0749  * Check the incoming bulk request @hdr from userspace and initialize the
0750  * internal @breq bulk request appropriately.  Returns 0 if the bulk request
0751  * should proceed; -ECANCELED if there's nothing to do; or the usual
0752  * negative error code.
0753  */
0754 static int
0755 xfs_bulk_ireq_setup(
0756     struct xfs_mount    *mp,
0757     struct xfs_bulk_ireq    *hdr,
0758     struct xfs_ibulk    *breq,
0759     void __user     *ubuffer)
0760 {
0761     if (hdr->icount == 0 ||
0762         (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) ||
0763         memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
0764         return -EINVAL;
0765 
0766     breq->startino = hdr->ino;
0767     breq->ubuffer = ubuffer;
0768     breq->icount = hdr->icount;
0769     breq->ocount = 0;
0770     breq->flags = 0;
0771 
0772     /*
0773      * The @ino parameter is a special value, so we must look it up here.
0774      * We're not allowed to have IREQ_AGNO, and we only return one inode
0775      * worth of data.
0776      */
0777     if (hdr->flags & XFS_BULK_IREQ_SPECIAL) {
0778         if (hdr->flags & XFS_BULK_IREQ_AGNO)
0779             return -EINVAL;
0780 
0781         switch (hdr->ino) {
0782         case XFS_BULK_IREQ_SPECIAL_ROOT:
0783             hdr->ino = mp->m_sb.sb_rootino;
0784             break;
0785         default:
0786             return -EINVAL;
0787         }
0788         breq->icount = 1;
0789     }
0790 
0791     /*
0792      * The IREQ_AGNO flag means that we only want results from a given AG.
0793      * If @hdr->ino is zero, we start iterating in that AG.  If @hdr->ino is
0794      * beyond the specified AG then we return no results.
0795      */
0796     if (hdr->flags & XFS_BULK_IREQ_AGNO) {
0797         if (hdr->agno >= mp->m_sb.sb_agcount)
0798             return -EINVAL;
0799 
0800         if (breq->startino == 0)
0801             breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0);
0802         else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
0803             return -EINVAL;
0804 
0805         breq->flags |= XFS_IBULK_SAME_AG;
0806 
0807         /* Asking for an inode past the end of the AG?  We're done! */
0808         if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
0809             return -ECANCELED;
0810     } else if (hdr->agno)
0811         return -EINVAL;
0812 
0813     /* Asking for an inode past the end of the FS?  We're done! */
0814     if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
0815         return -ECANCELED;
0816 
0817     if (hdr->flags & XFS_BULK_IREQ_NREXT64)
0818         breq->flags |= XFS_IBULK_NREXT64;
0819 
0820     return 0;
0821 }
0822 
0823 /*
0824  * Update the userspace bulk request @hdr to reflect the end state of the
0825  * internal bulk request @breq.
0826  */
0827 static void
0828 xfs_bulk_ireq_teardown(
0829     struct xfs_bulk_ireq    *hdr,
0830     struct xfs_ibulk    *breq)
0831 {
0832     hdr->ino = breq->startino;
0833     hdr->ocount = breq->ocount;
0834 }
0835 
0836 /* Handle the v5 bulkstat ioctl. */
0837 STATIC int
0838 xfs_ioc_bulkstat(
0839     struct file         *file,
0840     unsigned int            cmd,
0841     struct xfs_bulkstat_req __user  *arg)
0842 {
0843     struct xfs_mount        *mp = XFS_I(file_inode(file))->i_mount;
0844     struct xfs_bulk_ireq        hdr;
0845     struct xfs_ibulk        breq = {
0846         .mp         = mp,
0847         .mnt_userns     = file_mnt_user_ns(file),
0848     };
0849     int             error;
0850 
0851     if (!capable(CAP_SYS_ADMIN))
0852         return -EPERM;
0853 
0854     if (xfs_is_shutdown(mp))
0855         return -EIO;
0856 
0857     if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
0858         return -EFAULT;
0859 
0860     error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
0861     if (error == -ECANCELED)
0862         goto out_teardown;
0863     if (error < 0)
0864         return error;
0865 
0866     error = xfs_bulkstat(&breq, xfs_bulkstat_fmt);
0867     if (error)
0868         return error;
0869 
0870 out_teardown:
0871     xfs_bulk_ireq_teardown(&hdr, &breq);
0872     if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
0873         return -EFAULT;
0874 
0875     return 0;
0876 }
0877 
0878 STATIC int
0879 xfs_inumbers_fmt(
0880     struct xfs_ibulk        *breq,
0881     const struct xfs_inumbers   *igrp)
0882 {
0883     if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers)))
0884         return -EFAULT;
0885     return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers));
0886 }
0887 
0888 /* Handle the v5 inumbers ioctl. */
0889 STATIC int
0890 xfs_ioc_inumbers(
0891     struct xfs_mount        *mp,
0892     unsigned int            cmd,
0893     struct xfs_inumbers_req __user  *arg)
0894 {
0895     struct xfs_bulk_ireq        hdr;
0896     struct xfs_ibulk        breq = {
0897         .mp         = mp,
0898     };
0899     int             error;
0900 
0901     if (!capable(CAP_SYS_ADMIN))
0902         return -EPERM;
0903 
0904     if (xfs_is_shutdown(mp))
0905         return -EIO;
0906 
0907     if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
0908         return -EFAULT;
0909 
0910     error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
0911     if (error == -ECANCELED)
0912         goto out_teardown;
0913     if (error < 0)
0914         return error;
0915 
0916     error = xfs_inumbers(&breq, xfs_inumbers_fmt);
0917     if (error)
0918         return error;
0919 
0920 out_teardown:
0921     xfs_bulk_ireq_teardown(&hdr, &breq);
0922     if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
0923         return -EFAULT;
0924 
0925     return 0;
0926 }
0927 
0928 STATIC int
0929 xfs_ioc_fsgeometry(
0930     struct xfs_mount    *mp,
0931     void            __user *arg,
0932     int         struct_version)
0933 {
0934     struct xfs_fsop_geom    fsgeo;
0935     size_t          len;
0936 
0937     xfs_fs_geometry(mp, &fsgeo, struct_version);
0938 
0939     if (struct_version <= 3)
0940         len = sizeof(struct xfs_fsop_geom_v1);
0941     else if (struct_version == 4)
0942         len = sizeof(struct xfs_fsop_geom_v4);
0943     else {
0944         xfs_fsop_geom_health(mp, &fsgeo);
0945         len = sizeof(fsgeo);
0946     }
0947 
0948     if (copy_to_user(arg, &fsgeo, len))
0949         return -EFAULT;
0950     return 0;
0951 }
0952 
0953 STATIC int
0954 xfs_ioc_ag_geometry(
0955     struct xfs_mount    *mp,
0956     void            __user *arg)
0957 {
0958     struct xfs_perag    *pag;
0959     struct xfs_ag_geometry  ageo;
0960     int         error;
0961 
0962     if (copy_from_user(&ageo, arg, sizeof(ageo)))
0963         return -EFAULT;
0964     if (ageo.ag_flags)
0965         return -EINVAL;
0966     if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
0967         return -EINVAL;
0968 
0969     pag = xfs_perag_get(mp, ageo.ag_number);
0970     if (!pag)
0971         return -EINVAL;
0972 
0973     error = xfs_ag_get_geometry(pag, &ageo);
0974     xfs_perag_put(pag);
0975     if (error)
0976         return error;
0977 
0978     if (copy_to_user(arg, &ageo, sizeof(ageo)))
0979         return -EFAULT;
0980     return 0;
0981 }
0982 
0983 /*
0984  * Linux extended inode flags interface.
0985  */
0986 
0987 static void
0988 xfs_fill_fsxattr(
0989     struct xfs_inode    *ip,
0990     int         whichfork,
0991     struct fileattr     *fa)
0992 {
0993     struct xfs_mount    *mp = ip->i_mount;
0994     struct xfs_ifork    *ifp = xfs_ifork_ptr(ip, whichfork);
0995 
0996     fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
0997 
0998     if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) {
0999         fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
1000     } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
1001         /*
1002          * Don't let a misaligned extent size hint on a directory
1003          * escape to userspace if it won't pass the setattr checks
1004          * later.
1005          */
1006         if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
1007             ip->i_extsize % mp->m_sb.sb_rextsize > 0) {
1008             fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
1009                         FS_XFLAG_EXTSZINHERIT);
1010             fa->fsx_extsize = 0;
1011         } else {
1012             fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
1013         }
1014     }
1015 
1016     if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
1017         fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
1018     fa->fsx_projid = ip->i_projid;
1019     if (ifp && !xfs_need_iread_extents(ifp))
1020         fa->fsx_nextents = xfs_iext_count(ifp);
1021     else
1022         fa->fsx_nextents = xfs_ifork_nextents(ifp);
1023 }
1024 
1025 STATIC int
1026 xfs_ioc_fsgetxattra(
1027     xfs_inode_t     *ip,
1028     void            __user *arg)
1029 {
1030     struct fileattr     fa;
1031 
1032     xfs_ilock(ip, XFS_ILOCK_SHARED);
1033     xfs_fill_fsxattr(ip, XFS_ATTR_FORK, &fa);
1034     xfs_iunlock(ip, XFS_ILOCK_SHARED);
1035 
1036     return copy_fsxattr_to_user(&fa, arg);
1037 }
1038 
1039 int
1040 xfs_fileattr_get(
1041     struct dentry       *dentry,
1042     struct fileattr     *fa)
1043 {
1044     struct xfs_inode    *ip = XFS_I(d_inode(dentry));
1045 
1046     if (d_is_special(dentry))
1047         return -ENOTTY;
1048 
1049     xfs_ilock(ip, XFS_ILOCK_SHARED);
1050     xfs_fill_fsxattr(ip, XFS_DATA_FORK, fa);
1051     xfs_iunlock(ip, XFS_ILOCK_SHARED);
1052 
1053     return 0;
1054 }
1055 
1056 STATIC uint16_t
1057 xfs_flags2diflags(
1058     struct xfs_inode    *ip,
1059     unsigned int        xflags)
1060 {
1061     /* can't set PREALLOC this way, just preserve it */
1062     uint16_t        di_flags =
1063         (ip->i_diflags & XFS_DIFLAG_PREALLOC);
1064 
1065     if (xflags & FS_XFLAG_IMMUTABLE)
1066         di_flags |= XFS_DIFLAG_IMMUTABLE;
1067     if (xflags & FS_XFLAG_APPEND)
1068         di_flags |= XFS_DIFLAG_APPEND;
1069     if (xflags & FS_XFLAG_SYNC)
1070         di_flags |= XFS_DIFLAG_SYNC;
1071     if (xflags & FS_XFLAG_NOATIME)
1072         di_flags |= XFS_DIFLAG_NOATIME;
1073     if (xflags & FS_XFLAG_NODUMP)
1074         di_flags |= XFS_DIFLAG_NODUMP;
1075     if (xflags & FS_XFLAG_NODEFRAG)
1076         di_flags |= XFS_DIFLAG_NODEFRAG;
1077     if (xflags & FS_XFLAG_FILESTREAM)
1078         di_flags |= XFS_DIFLAG_FILESTREAM;
1079     if (S_ISDIR(VFS_I(ip)->i_mode)) {
1080         if (xflags & FS_XFLAG_RTINHERIT)
1081             di_flags |= XFS_DIFLAG_RTINHERIT;
1082         if (xflags & FS_XFLAG_NOSYMLINKS)
1083             di_flags |= XFS_DIFLAG_NOSYMLINKS;
1084         if (xflags & FS_XFLAG_EXTSZINHERIT)
1085             di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1086         if (xflags & FS_XFLAG_PROJINHERIT)
1087             di_flags |= XFS_DIFLAG_PROJINHERIT;
1088     } else if (S_ISREG(VFS_I(ip)->i_mode)) {
1089         if (xflags & FS_XFLAG_REALTIME)
1090             di_flags |= XFS_DIFLAG_REALTIME;
1091         if (xflags & FS_XFLAG_EXTSIZE)
1092             di_flags |= XFS_DIFLAG_EXTSIZE;
1093     }
1094 
1095     return di_flags;
1096 }
1097 
1098 STATIC uint64_t
1099 xfs_flags2diflags2(
1100     struct xfs_inode    *ip,
1101     unsigned int        xflags)
1102 {
1103     uint64_t        di_flags2 =
1104         (ip->i_diflags2 & (XFS_DIFLAG2_REFLINK |
1105                    XFS_DIFLAG2_BIGTIME |
1106                    XFS_DIFLAG2_NREXT64));
1107 
1108     if (xflags & FS_XFLAG_DAX)
1109         di_flags2 |= XFS_DIFLAG2_DAX;
1110     if (xflags & FS_XFLAG_COWEXTSIZE)
1111         di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
1112 
1113     return di_flags2;
1114 }
1115 
1116 static int
1117 xfs_ioctl_setattr_xflags(
1118     struct xfs_trans    *tp,
1119     struct xfs_inode    *ip,
1120     struct fileattr     *fa)
1121 {
1122     struct xfs_mount    *mp = ip->i_mount;
1123     uint64_t        i_flags2;
1124 
1125     /* Can't change realtime flag if any extents are allocated. */
1126     if ((ip->i_df.if_nextents || ip->i_delayed_blks) &&
1127         XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME))
1128         return -EINVAL;
1129 
1130     /* If realtime flag is set then must have realtime device */
1131     if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
1132         if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
1133             (ip->i_extsize % mp->m_sb.sb_rextsize))
1134             return -EINVAL;
1135     }
1136 
1137     /* Clear reflink if we are actually able to set the rt flag. */
1138     if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
1139         ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1140 
1141     /* Don't allow us to set DAX mode for a reflinked file for now. */
1142     if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip))
1143         return -EINVAL;
1144 
1145     /* diflags2 only valid for v3 inodes. */
1146     i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
1147     if (i_flags2 && !xfs_has_v3inodes(mp))
1148         return -EINVAL;
1149 
1150     ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
1151     ip->i_diflags2 = i_flags2;
1152 
1153     xfs_diflags_to_iflags(ip, false);
1154     xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1155     xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1156     XFS_STATS_INC(mp, xs_ig_attrchg);
1157     return 0;
1158 }
1159 
1160 static void
1161 xfs_ioctl_setattr_prepare_dax(
1162     struct xfs_inode    *ip,
1163     struct fileattr     *fa)
1164 {
1165     struct xfs_mount    *mp = ip->i_mount;
1166     struct inode            *inode = VFS_I(ip);
1167 
1168     if (S_ISDIR(inode->i_mode))
1169         return;
1170 
1171     if (xfs_has_dax_always(mp) || xfs_has_dax_never(mp))
1172         return;
1173 
1174     if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
1175         !(ip->i_diflags2 & XFS_DIFLAG2_DAX)) ||
1176         (!(fa->fsx_xflags & FS_XFLAG_DAX) &&
1177          (ip->i_diflags2 & XFS_DIFLAG2_DAX)))
1178         d_mark_dontcache(inode);
1179 }
1180 
1181 /*
1182  * Set up the transaction structure for the setattr operation, checking that we
1183  * have permission to do so. On success, return a clean transaction and the
1184  * inode locked exclusively ready for further operation specific checks. On
1185  * failure, return an error without modifying or locking the inode.
1186  */
1187 static struct xfs_trans *
1188 xfs_ioctl_setattr_get_trans(
1189     struct xfs_inode    *ip,
1190     struct xfs_dquot    *pdqp)
1191 {
1192     struct xfs_mount    *mp = ip->i_mount;
1193     struct xfs_trans    *tp;
1194     int         error = -EROFS;
1195 
1196     if (xfs_is_readonly(mp))
1197         goto out_error;
1198     error = -EIO;
1199     if (xfs_is_shutdown(mp))
1200         goto out_error;
1201 
1202     error = xfs_trans_alloc_ichange(ip, NULL, NULL, pdqp,
1203             has_capability_noaudit(current, CAP_FOWNER), &tp);
1204     if (error)
1205         goto out_error;
1206 
1207     if (xfs_has_wsync(mp))
1208         xfs_trans_set_sync(tp);
1209 
1210     return tp;
1211 
1212 out_error:
1213     return ERR_PTR(error);
1214 }
1215 
1216 /*
1217  * Validate a proposed extent size hint.  For regular files, the hint can only
1218  * be changed if no extents are allocated.
1219  */
1220 static int
1221 xfs_ioctl_setattr_check_extsize(
1222     struct xfs_inode    *ip,
1223     struct fileattr     *fa)
1224 {
1225     struct xfs_mount    *mp = ip->i_mount;
1226     xfs_failaddr_t      failaddr;
1227     uint16_t        new_diflags;
1228 
1229     if (!fa->fsx_valid)
1230         return 0;
1231 
1232     if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
1233         XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize)
1234         return -EINVAL;
1235 
1236     if (fa->fsx_extsize & mp->m_blockmask)
1237         return -EINVAL;
1238 
1239     new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
1240 
1241     /*
1242      * Inode verifiers do not check that the extent size hint is an integer
1243      * multiple of the rt extent size on a directory with both rtinherit
1244      * and extszinherit flags set.  Don't let sysadmins misconfigure
1245      * directories.
1246      */
1247     if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
1248         (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
1249         unsigned int    rtextsize_bytes;
1250 
1251         rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
1252         if (fa->fsx_extsize % rtextsize_bytes)
1253             return -EINVAL;
1254     }
1255 
1256     failaddr = xfs_inode_validate_extsize(ip->i_mount,
1257             XFS_B_TO_FSB(mp, fa->fsx_extsize),
1258             VFS_I(ip)->i_mode, new_diflags);
1259     return failaddr != NULL ? -EINVAL : 0;
1260 }
1261 
1262 static int
1263 xfs_ioctl_setattr_check_cowextsize(
1264     struct xfs_inode    *ip,
1265     struct fileattr     *fa)
1266 {
1267     struct xfs_mount    *mp = ip->i_mount;
1268     xfs_failaddr_t      failaddr;
1269     uint64_t        new_diflags2;
1270     uint16_t        new_diflags;
1271 
1272     if (!fa->fsx_valid)
1273         return 0;
1274 
1275     if (fa->fsx_cowextsize & mp->m_blockmask)
1276         return -EINVAL;
1277 
1278     new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
1279     new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
1280 
1281     failaddr = xfs_inode_validate_cowextsize(ip->i_mount,
1282             XFS_B_TO_FSB(mp, fa->fsx_cowextsize),
1283             VFS_I(ip)->i_mode, new_diflags, new_diflags2);
1284     return failaddr != NULL ? -EINVAL : 0;
1285 }
1286 
1287 static int
1288 xfs_ioctl_setattr_check_projid(
1289     struct xfs_inode    *ip,
1290     struct fileattr     *fa)
1291 {
1292     if (!fa->fsx_valid)
1293         return 0;
1294 
1295     /* Disallow 32bit project ids if 32bit IDs are not enabled. */
1296     if (fa->fsx_projid > (uint16_t)-1 &&
1297         !xfs_has_projid32(ip->i_mount))
1298         return -EINVAL;
1299     return 0;
1300 }
1301 
1302 int
1303 xfs_fileattr_set(
1304     struct user_namespace   *mnt_userns,
1305     struct dentry       *dentry,
1306     struct fileattr     *fa)
1307 {
1308     struct xfs_inode    *ip = XFS_I(d_inode(dentry));
1309     struct xfs_mount    *mp = ip->i_mount;
1310     struct xfs_trans    *tp;
1311     struct xfs_dquot    *pdqp = NULL;
1312     struct xfs_dquot    *olddquot = NULL;
1313     int         error;
1314 
1315     trace_xfs_ioctl_setattr(ip);
1316 
1317     if (d_is_special(dentry))
1318         return -ENOTTY;
1319 
1320     if (!fa->fsx_valid) {
1321         if (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL |
1322                   FS_NOATIME_FL | FS_NODUMP_FL |
1323                   FS_SYNC_FL | FS_DAX_FL | FS_PROJINHERIT_FL))
1324             return -EOPNOTSUPP;
1325     }
1326 
1327     error = xfs_ioctl_setattr_check_projid(ip, fa);
1328     if (error)
1329         return error;
1330 
1331     /*
1332      * If disk quotas is on, we make sure that the dquots do exist on disk,
1333      * before we start any other transactions. Trying to do this later
1334      * is messy. We don't care to take a readlock to look at the ids
1335      * in inode here, because we can't hold it across the trans_reserve.
1336      * If the IDs do change before we take the ilock, we're covered
1337      * because the i_*dquot fields will get updated anyway.
1338      */
1339     if (fa->fsx_valid && XFS_IS_QUOTA_ON(mp)) {
1340         error = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
1341                 VFS_I(ip)->i_gid, fa->fsx_projid,
1342                 XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp);
1343         if (error)
1344             return error;
1345     }
1346 
1347     xfs_ioctl_setattr_prepare_dax(ip, fa);
1348 
1349     tp = xfs_ioctl_setattr_get_trans(ip, pdqp);
1350     if (IS_ERR(tp)) {
1351         error = PTR_ERR(tp);
1352         goto error_free_dquots;
1353     }
1354 
1355     error = xfs_ioctl_setattr_check_extsize(ip, fa);
1356     if (error)
1357         goto error_trans_cancel;
1358 
1359     error = xfs_ioctl_setattr_check_cowextsize(ip, fa);
1360     if (error)
1361         goto error_trans_cancel;
1362 
1363     error = xfs_ioctl_setattr_xflags(tp, ip, fa);
1364     if (error)
1365         goto error_trans_cancel;
1366 
1367     if (!fa->fsx_valid)
1368         goto skip_xattr;
1369     /*
1370      * Change file ownership.  Must be the owner or privileged.  CAP_FSETID
1371      * overrides the following restrictions:
1372      *
1373      * The set-user-ID and set-group-ID bits of a file will be cleared upon
1374      * successful return from chown()
1375      */
1376 
1377     if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
1378         !capable_wrt_inode_uidgid(mnt_userns, VFS_I(ip), CAP_FSETID))
1379         VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
1380 
1381     /* Change the ownerships and register project quota modifications */
1382     if (ip->i_projid != fa->fsx_projid) {
1383         if (XFS_IS_PQUOTA_ON(mp)) {
1384             olddquot = xfs_qm_vop_chown(tp, ip,
1385                         &ip->i_pdquot, pdqp);
1386         }
1387         ip->i_projid = fa->fsx_projid;
1388     }
1389 
1390     /*
1391      * Only set the extent size hint if we've already determined that the
1392      * extent size hint should be set on the inode. If no extent size flags
1393      * are set on the inode then unconditionally clear the extent size hint.
1394      */
1395     if (ip->i_diflags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
1396         ip->i_extsize = XFS_B_TO_FSB(mp, fa->fsx_extsize);
1397     else
1398         ip->i_extsize = 0;
1399 
1400     if (xfs_has_v3inodes(mp)) {
1401         if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
1402             ip->i_cowextsize = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
1403         else
1404             ip->i_cowextsize = 0;
1405     }
1406 
1407 skip_xattr:
1408     error = xfs_trans_commit(tp);
1409 
1410     /*
1411      * Release any dquot(s) the inode had kept before chown.
1412      */
1413     xfs_qm_dqrele(olddquot);
1414     xfs_qm_dqrele(pdqp);
1415 
1416     return error;
1417 
1418 error_trans_cancel:
1419     xfs_trans_cancel(tp);
1420 error_free_dquots:
1421     xfs_qm_dqrele(pdqp);
1422     return error;
1423 }
1424 
1425 static bool
1426 xfs_getbmap_format(
1427     struct kgetbmap     *p,
1428     struct getbmapx __user  *u,
1429     size_t          recsize)
1430 {
1431     if (put_user(p->bmv_offset, &u->bmv_offset) ||
1432         put_user(p->bmv_block, &u->bmv_block) ||
1433         put_user(p->bmv_length, &u->bmv_length) ||
1434         put_user(0, &u->bmv_count) ||
1435         put_user(0, &u->bmv_entries))
1436         return false;
1437     if (recsize < sizeof(struct getbmapx))
1438         return true;
1439     if (put_user(0, &u->bmv_iflags) ||
1440         put_user(p->bmv_oflags, &u->bmv_oflags) ||
1441         put_user(0, &u->bmv_unused1) ||
1442         put_user(0, &u->bmv_unused2))
1443         return false;
1444     return true;
1445 }
1446 
1447 STATIC int
1448 xfs_ioc_getbmap(
1449     struct file     *file,
1450     unsigned int        cmd,
1451     void            __user *arg)
1452 {
1453     struct getbmapx     bmx = { 0 };
1454     struct kgetbmap     *buf;
1455     size_t          recsize;
1456     int         error, i;
1457 
1458     switch (cmd) {
1459     case XFS_IOC_GETBMAPA:
1460         bmx.bmv_iflags = BMV_IF_ATTRFORK;
1461         fallthrough;
1462     case XFS_IOC_GETBMAP:
1463         /* struct getbmap is a strict subset of struct getbmapx. */
1464         recsize = sizeof(struct getbmap);
1465         break;
1466     case XFS_IOC_GETBMAPX:
1467         recsize = sizeof(struct getbmapx);
1468         break;
1469     default:
1470         return -EINVAL;
1471     }
1472 
1473     if (copy_from_user(&bmx, arg, recsize))
1474         return -EFAULT;
1475 
1476     if (bmx.bmv_count < 2)
1477         return -EINVAL;
1478     if (bmx.bmv_count >= INT_MAX / recsize)
1479         return -ENOMEM;
1480 
1481     buf = kvcalloc(bmx.bmv_count, sizeof(*buf), GFP_KERNEL);
1482     if (!buf)
1483         return -ENOMEM;
1484 
1485     error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
1486     if (error)
1487         goto out_free_buf;
1488 
1489     error = -EFAULT;
1490     if (copy_to_user(arg, &bmx, recsize))
1491         goto out_free_buf;
1492     arg += recsize;
1493 
1494     for (i = 0; i < bmx.bmv_entries; i++) {
1495         if (!xfs_getbmap_format(buf + i, arg, recsize))
1496             goto out_free_buf;
1497         arg += recsize;
1498     }
1499 
1500     error = 0;
1501 out_free_buf:
1502     kmem_free(buf);
1503     return error;
1504 }
1505 
1506 STATIC int
1507 xfs_ioc_getfsmap(
1508     struct xfs_inode    *ip,
1509     struct fsmap_head   __user *arg)
1510 {
1511     struct xfs_fsmap_head   xhead = {0};
1512     struct fsmap_head   head;
1513     struct fsmap        *recs;
1514     unsigned int        count;
1515     __u32           last_flags = 0;
1516     bool            done = false;
1517     int         error;
1518 
1519     if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
1520         return -EFAULT;
1521     if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
1522         memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
1523                sizeof(head.fmh_keys[0].fmr_reserved)) ||
1524         memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
1525                sizeof(head.fmh_keys[1].fmr_reserved)))
1526         return -EINVAL;
1527 
1528     /*
1529      * Use an internal memory buffer so that we don't have to copy fsmap
1530      * data to userspace while holding locks.  Start by trying to allocate
1531      * up to 128k for the buffer, but fall back to a single page if needed.
1532      */
1533     count = min_t(unsigned int, head.fmh_count,
1534             131072 / sizeof(struct fsmap));
1535     recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
1536     if (!recs) {
1537         count = min_t(unsigned int, head.fmh_count,
1538                 PAGE_SIZE / sizeof(struct fsmap));
1539         recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
1540         if (!recs)
1541             return -ENOMEM;
1542     }
1543 
1544     xhead.fmh_iflags = head.fmh_iflags;
1545     xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
1546     xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
1547 
1548     trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
1549     trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
1550 
1551     head.fmh_entries = 0;
1552     do {
1553         struct fsmap __user *user_recs;
1554         struct fsmap        *last_rec;
1555 
1556         user_recs = &arg->fmh_recs[head.fmh_entries];
1557         xhead.fmh_entries = 0;
1558         xhead.fmh_count = min_t(unsigned int, count,
1559                     head.fmh_count - head.fmh_entries);
1560 
1561         /* Run query, record how many entries we got. */
1562         error = xfs_getfsmap(ip->i_mount, &xhead, recs);
1563         switch (error) {
1564         case 0:
1565             /*
1566              * There are no more records in the result set.  Copy
1567              * whatever we got to userspace and break out.
1568              */
1569             done = true;
1570             break;
1571         case -ECANCELED:
1572             /*
1573              * The internal memory buffer is full.  Copy whatever
1574              * records we got to userspace and go again if we have
1575              * not yet filled the userspace buffer.
1576              */
1577             error = 0;
1578             break;
1579         default:
1580             goto out_free;
1581         }
1582         head.fmh_entries += xhead.fmh_entries;
1583         head.fmh_oflags = xhead.fmh_oflags;
1584 
1585         /*
1586          * If the caller wanted a record count or there aren't any
1587          * new records to return, we're done.
1588          */
1589         if (head.fmh_count == 0 || xhead.fmh_entries == 0)
1590             break;
1591 
1592         /* Copy all the records we got out to userspace. */
1593         if (copy_to_user(user_recs, recs,
1594                  xhead.fmh_entries * sizeof(struct fsmap))) {
1595             error = -EFAULT;
1596             goto out_free;
1597         }
1598 
1599         /* Remember the last record flags we copied to userspace. */
1600         last_rec = &recs[xhead.fmh_entries - 1];
1601         last_flags = last_rec->fmr_flags;
1602 
1603         /* Set up the low key for the next iteration. */
1604         xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
1605         trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
1606     } while (!done && head.fmh_entries < head.fmh_count);
1607 
1608     /*
1609      * If there are no more records in the query result set and we're not
1610      * in counting mode, mark the last record returned with the LAST flag.
1611      */
1612     if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
1613         struct fsmap __user *user_rec;
1614 
1615         last_flags |= FMR_OF_LAST;
1616         user_rec = &arg->fmh_recs[head.fmh_entries - 1];
1617 
1618         if (copy_to_user(&user_rec->fmr_flags, &last_flags,
1619                     sizeof(last_flags))) {
1620             error = -EFAULT;
1621             goto out_free;
1622         }
1623     }
1624 
1625     /* copy back header */
1626     if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
1627         error = -EFAULT;
1628         goto out_free;
1629     }
1630 
1631 out_free:
1632     kmem_free(recs);
1633     return error;
1634 }
1635 
1636 STATIC int
1637 xfs_ioc_scrub_metadata(
1638     struct file         *file,
1639     void                __user *arg)
1640 {
1641     struct xfs_scrub_metadata   scrub;
1642     int             error;
1643 
1644     if (!capable(CAP_SYS_ADMIN))
1645         return -EPERM;
1646 
1647     if (copy_from_user(&scrub, arg, sizeof(scrub)))
1648         return -EFAULT;
1649 
1650     error = xfs_scrub_metadata(file, &scrub);
1651     if (error)
1652         return error;
1653 
1654     if (copy_to_user(arg, &scrub, sizeof(scrub)))
1655         return -EFAULT;
1656 
1657     return 0;
1658 }
1659 
1660 int
1661 xfs_ioc_swapext(
1662     xfs_swapext_t   *sxp)
1663 {
1664     xfs_inode_t     *ip, *tip;
1665     struct fd   f, tmp;
1666     int     error = 0;
1667 
1668     /* Pull information for the target fd */
1669     f = fdget((int)sxp->sx_fdtarget);
1670     if (!f.file) {
1671         error = -EINVAL;
1672         goto out;
1673     }
1674 
1675     if (!(f.file->f_mode & FMODE_WRITE) ||
1676         !(f.file->f_mode & FMODE_READ) ||
1677         (f.file->f_flags & O_APPEND)) {
1678         error = -EBADF;
1679         goto out_put_file;
1680     }
1681 
1682     tmp = fdget((int)sxp->sx_fdtmp);
1683     if (!tmp.file) {
1684         error = -EINVAL;
1685         goto out_put_file;
1686     }
1687 
1688     if (!(tmp.file->f_mode & FMODE_WRITE) ||
1689         !(tmp.file->f_mode & FMODE_READ) ||
1690         (tmp.file->f_flags & O_APPEND)) {
1691         error = -EBADF;
1692         goto out_put_tmp_file;
1693     }
1694 
1695     if (IS_SWAPFILE(file_inode(f.file)) ||
1696         IS_SWAPFILE(file_inode(tmp.file))) {
1697         error = -EINVAL;
1698         goto out_put_tmp_file;
1699     }
1700 
1701     /*
1702      * We need to ensure that the fds passed in point to XFS inodes
1703      * before we cast and access them as XFS structures as we have no
1704      * control over what the user passes us here.
1705      */
1706     if (f.file->f_op != &xfs_file_operations ||
1707         tmp.file->f_op != &xfs_file_operations) {
1708         error = -EINVAL;
1709         goto out_put_tmp_file;
1710     }
1711 
1712     ip = XFS_I(file_inode(f.file));
1713     tip = XFS_I(file_inode(tmp.file));
1714 
1715     if (ip->i_mount != tip->i_mount) {
1716         error = -EINVAL;
1717         goto out_put_tmp_file;
1718     }
1719 
1720     if (ip->i_ino == tip->i_ino) {
1721         error = -EINVAL;
1722         goto out_put_tmp_file;
1723     }
1724 
1725     if (xfs_is_shutdown(ip->i_mount)) {
1726         error = -EIO;
1727         goto out_put_tmp_file;
1728     }
1729 
1730     error = xfs_swap_extents(ip, tip, sxp);
1731 
1732  out_put_tmp_file:
1733     fdput(tmp);
1734  out_put_file:
1735     fdput(f);
1736  out:
1737     return error;
1738 }
1739 
1740 static int
1741 xfs_ioc_getlabel(
1742     struct xfs_mount    *mp,
1743     char            __user *user_label)
1744 {
1745     struct xfs_sb       *sbp = &mp->m_sb;
1746     char            label[XFSLABEL_MAX + 1];
1747 
1748     /* Paranoia */
1749     BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX);
1750 
1751     /* 1 larger than sb_fname, so this ensures a trailing NUL char */
1752     memset(label, 0, sizeof(label));
1753     spin_lock(&mp->m_sb_lock);
1754     strncpy(label, sbp->sb_fname, XFSLABEL_MAX);
1755     spin_unlock(&mp->m_sb_lock);
1756 
1757     if (copy_to_user(user_label, label, sizeof(label)))
1758         return -EFAULT;
1759     return 0;
1760 }
1761 
1762 static int
1763 xfs_ioc_setlabel(
1764     struct file     *filp,
1765     struct xfs_mount    *mp,
1766     char            __user *newlabel)
1767 {
1768     struct xfs_sb       *sbp = &mp->m_sb;
1769     char            label[XFSLABEL_MAX + 1];
1770     size_t          len;
1771     int         error;
1772 
1773     if (!capable(CAP_SYS_ADMIN))
1774         return -EPERM;
1775     /*
1776      * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much
1777      * smaller, at 12 bytes.  We copy one more to be sure we find the
1778      * (required) NULL character to test the incoming label length.
1779      * NB: The on disk label doesn't need to be null terminated.
1780      */
1781     if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1))
1782         return -EFAULT;
1783     len = strnlen(label, XFSLABEL_MAX + 1);
1784     if (len > sizeof(sbp->sb_fname))
1785         return -EINVAL;
1786 
1787     error = mnt_want_write_file(filp);
1788     if (error)
1789         return error;
1790 
1791     spin_lock(&mp->m_sb_lock);
1792     memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname));
1793     memcpy(sbp->sb_fname, label, len);
1794     spin_unlock(&mp->m_sb_lock);
1795 
1796     /*
1797      * Now we do several things to satisfy userspace.
1798      * In addition to normal logging of the primary superblock, we also
1799      * immediately write these changes to sector zero for the primary, then
1800      * update all backup supers (as xfs_db does for a label change), then
1801      * invalidate the block device page cache.  This is so that any prior
1802      * buffered reads from userspace (i.e. from blkid) are invalidated,
1803      * and userspace will see the newly-written label.
1804      */
1805     error = xfs_sync_sb_buf(mp);
1806     if (error)
1807         goto out;
1808     /*
1809      * growfs also updates backup supers so lock against that.
1810      */
1811     mutex_lock(&mp->m_growlock);
1812     error = xfs_update_secondary_sbs(mp);
1813     mutex_unlock(&mp->m_growlock);
1814 
1815     invalidate_bdev(mp->m_ddev_targp->bt_bdev);
1816 
1817 out:
1818     mnt_drop_write_file(filp);
1819     return error;
1820 }
1821 
1822 static inline int
1823 xfs_fs_eofblocks_from_user(
1824     struct xfs_fs_eofblocks     *src,
1825     struct xfs_icwalk       *dst)
1826 {
1827     if (src->eof_version != XFS_EOFBLOCKS_VERSION)
1828         return -EINVAL;
1829 
1830     if (src->eof_flags & ~XFS_EOF_FLAGS_VALID)
1831         return -EINVAL;
1832 
1833     if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) ||
1834         memchr_inv(src->pad64, 0, sizeof(src->pad64)))
1835         return -EINVAL;
1836 
1837     dst->icw_flags = 0;
1838     if (src->eof_flags & XFS_EOF_FLAGS_SYNC)
1839         dst->icw_flags |= XFS_ICWALK_FLAG_SYNC;
1840     if (src->eof_flags & XFS_EOF_FLAGS_UID)
1841         dst->icw_flags |= XFS_ICWALK_FLAG_UID;
1842     if (src->eof_flags & XFS_EOF_FLAGS_GID)
1843         dst->icw_flags |= XFS_ICWALK_FLAG_GID;
1844     if (src->eof_flags & XFS_EOF_FLAGS_PRID)
1845         dst->icw_flags |= XFS_ICWALK_FLAG_PRID;
1846     if (src->eof_flags & XFS_EOF_FLAGS_MINFILESIZE)
1847         dst->icw_flags |= XFS_ICWALK_FLAG_MINFILESIZE;
1848 
1849     dst->icw_prid = src->eof_prid;
1850     dst->icw_min_file_size = src->eof_min_file_size;
1851 
1852     dst->icw_uid = INVALID_UID;
1853     if (src->eof_flags & XFS_EOF_FLAGS_UID) {
1854         dst->icw_uid = make_kuid(current_user_ns(), src->eof_uid);
1855         if (!uid_valid(dst->icw_uid))
1856             return -EINVAL;
1857     }
1858 
1859     dst->icw_gid = INVALID_GID;
1860     if (src->eof_flags & XFS_EOF_FLAGS_GID) {
1861         dst->icw_gid = make_kgid(current_user_ns(), src->eof_gid);
1862         if (!gid_valid(dst->icw_gid))
1863             return -EINVAL;
1864     }
1865     return 0;
1866 }
1867 
1868 /*
1869  * These long-unused ioctls were removed from the official ioctl API in 5.17,
1870  * but retain these definitions so that we can log warnings about them.
1871  */
1872 #define XFS_IOC_ALLOCSP     _IOW ('X', 10, struct xfs_flock64)
1873 #define XFS_IOC_FREESP      _IOW ('X', 11, struct xfs_flock64)
1874 #define XFS_IOC_ALLOCSP64   _IOW ('X', 36, struct xfs_flock64)
1875 #define XFS_IOC_FREESP64    _IOW ('X', 37, struct xfs_flock64)
1876 
1877 /*
1878  * Note: some of the ioctl's return positive numbers as a
1879  * byte count indicating success, such as readlink_by_handle.
1880  * So we don't "sign flip" like most other routines.  This means
1881  * true errors need to be returned as a negative value.
1882  */
1883 long
1884 xfs_file_ioctl(
1885     struct file     *filp,
1886     unsigned int        cmd,
1887     unsigned long       p)
1888 {
1889     struct inode        *inode = file_inode(filp);
1890     struct xfs_inode    *ip = XFS_I(inode);
1891     struct xfs_mount    *mp = ip->i_mount;
1892     void            __user *arg = (void __user *)p;
1893     int         error;
1894 
1895     trace_xfs_file_ioctl(ip);
1896 
1897     switch (cmd) {
1898     case FITRIM:
1899         return xfs_ioc_trim(mp, arg);
1900     case FS_IOC_GETFSLABEL:
1901         return xfs_ioc_getlabel(mp, arg);
1902     case FS_IOC_SETFSLABEL:
1903         return xfs_ioc_setlabel(filp, mp, arg);
1904     case XFS_IOC_ALLOCSP:
1905     case XFS_IOC_FREESP:
1906     case XFS_IOC_ALLOCSP64:
1907     case XFS_IOC_FREESP64:
1908         xfs_warn_once(mp,
1909     "%s should use fallocate; XFS_IOC_{ALLOC,FREE}SP ioctl unsupported",
1910                 current->comm);
1911         return -ENOTTY;
1912     case XFS_IOC_DIOINFO: {
1913         struct xfs_buftarg  *target = xfs_inode_buftarg(ip);
1914         struct dioattr      da;
1915 
1916         da.d_mem =  da.d_miniosz = target->bt_logical_sectorsize;
1917         da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
1918 
1919         if (copy_to_user(arg, &da, sizeof(da)))
1920             return -EFAULT;
1921         return 0;
1922     }
1923 
1924     case XFS_IOC_FSBULKSTAT_SINGLE:
1925     case XFS_IOC_FSBULKSTAT:
1926     case XFS_IOC_FSINUMBERS:
1927         return xfs_ioc_fsbulkstat(filp, cmd, arg);
1928 
1929     case XFS_IOC_BULKSTAT:
1930         return xfs_ioc_bulkstat(filp, cmd, arg);
1931     case XFS_IOC_INUMBERS:
1932         return xfs_ioc_inumbers(mp, cmd, arg);
1933 
1934     case XFS_IOC_FSGEOMETRY_V1:
1935         return xfs_ioc_fsgeometry(mp, arg, 3);
1936     case XFS_IOC_FSGEOMETRY_V4:
1937         return xfs_ioc_fsgeometry(mp, arg, 4);
1938     case XFS_IOC_FSGEOMETRY:
1939         return xfs_ioc_fsgeometry(mp, arg, 5);
1940 
1941     case XFS_IOC_AG_GEOMETRY:
1942         return xfs_ioc_ag_geometry(mp, arg);
1943 
1944     case XFS_IOC_GETVERSION:
1945         return put_user(inode->i_generation, (int __user *)arg);
1946 
1947     case XFS_IOC_FSGETXATTRA:
1948         return xfs_ioc_fsgetxattra(ip, arg);
1949 
1950     case XFS_IOC_GETBMAP:
1951     case XFS_IOC_GETBMAPA:
1952     case XFS_IOC_GETBMAPX:
1953         return xfs_ioc_getbmap(filp, cmd, arg);
1954 
1955     case FS_IOC_GETFSMAP:
1956         return xfs_ioc_getfsmap(ip, arg);
1957 
1958     case XFS_IOC_SCRUB_METADATA:
1959         return xfs_ioc_scrub_metadata(filp, arg);
1960 
1961     case XFS_IOC_FD_TO_HANDLE:
1962     case XFS_IOC_PATH_TO_HANDLE:
1963     case XFS_IOC_PATH_TO_FSHANDLE: {
1964         xfs_fsop_handlereq_t    hreq;
1965 
1966         if (copy_from_user(&hreq, arg, sizeof(hreq)))
1967             return -EFAULT;
1968         return xfs_find_handle(cmd, &hreq);
1969     }
1970     case XFS_IOC_OPEN_BY_HANDLE: {
1971         xfs_fsop_handlereq_t    hreq;
1972 
1973         if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
1974             return -EFAULT;
1975         return xfs_open_by_handle(filp, &hreq);
1976     }
1977 
1978     case XFS_IOC_READLINK_BY_HANDLE: {
1979         xfs_fsop_handlereq_t    hreq;
1980 
1981         if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
1982             return -EFAULT;
1983         return xfs_readlink_by_handle(filp, &hreq);
1984     }
1985     case XFS_IOC_ATTRLIST_BY_HANDLE:
1986         return xfs_attrlist_by_handle(filp, arg);
1987 
1988     case XFS_IOC_ATTRMULTI_BY_HANDLE:
1989         return xfs_attrmulti_by_handle(filp, arg);
1990 
1991     case XFS_IOC_SWAPEXT: {
1992         struct xfs_swapext  sxp;
1993 
1994         if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
1995             return -EFAULT;
1996         error = mnt_want_write_file(filp);
1997         if (error)
1998             return error;
1999         error = xfs_ioc_swapext(&sxp);
2000         mnt_drop_write_file(filp);
2001         return error;
2002     }
2003 
2004     case XFS_IOC_FSCOUNTS: {
2005         xfs_fsop_counts_t out;
2006 
2007         xfs_fs_counts(mp, &out);
2008 
2009         if (copy_to_user(arg, &out, sizeof(out)))
2010             return -EFAULT;
2011         return 0;
2012     }
2013 
2014     case XFS_IOC_SET_RESBLKS: {
2015         xfs_fsop_resblks_t inout;
2016         uint64_t       in;
2017 
2018         if (!capable(CAP_SYS_ADMIN))
2019             return -EPERM;
2020 
2021         if (xfs_is_readonly(mp))
2022             return -EROFS;
2023 
2024         if (copy_from_user(&inout, arg, sizeof(inout)))
2025             return -EFAULT;
2026 
2027         error = mnt_want_write_file(filp);
2028         if (error)
2029             return error;
2030 
2031         /* input parameter is passed in resblks field of structure */
2032         in = inout.resblks;
2033         error = xfs_reserve_blocks(mp, &in, &inout);
2034         mnt_drop_write_file(filp);
2035         if (error)
2036             return error;
2037 
2038         if (copy_to_user(arg, &inout, sizeof(inout)))
2039             return -EFAULT;
2040         return 0;
2041     }
2042 
2043     case XFS_IOC_GET_RESBLKS: {
2044         xfs_fsop_resblks_t out;
2045 
2046         if (!capable(CAP_SYS_ADMIN))
2047             return -EPERM;
2048 
2049         error = xfs_reserve_blocks(mp, NULL, &out);
2050         if (error)
2051             return error;
2052 
2053         if (copy_to_user(arg, &out, sizeof(out)))
2054             return -EFAULT;
2055 
2056         return 0;
2057     }
2058 
2059     case XFS_IOC_FSGROWFSDATA: {
2060         struct xfs_growfs_data in;
2061 
2062         if (copy_from_user(&in, arg, sizeof(in)))
2063             return -EFAULT;
2064 
2065         error = mnt_want_write_file(filp);
2066         if (error)
2067             return error;
2068         error = xfs_growfs_data(mp, &in);
2069         mnt_drop_write_file(filp);
2070         return error;
2071     }
2072 
2073     case XFS_IOC_FSGROWFSLOG: {
2074         struct xfs_growfs_log in;
2075 
2076         if (copy_from_user(&in, arg, sizeof(in)))
2077             return -EFAULT;
2078 
2079         error = mnt_want_write_file(filp);
2080         if (error)
2081             return error;
2082         error = xfs_growfs_log(mp, &in);
2083         mnt_drop_write_file(filp);
2084         return error;
2085     }
2086 
2087     case XFS_IOC_FSGROWFSRT: {
2088         xfs_growfs_rt_t in;
2089 
2090         if (copy_from_user(&in, arg, sizeof(in)))
2091             return -EFAULT;
2092 
2093         error = mnt_want_write_file(filp);
2094         if (error)
2095             return error;
2096         error = xfs_growfs_rt(mp, &in);
2097         mnt_drop_write_file(filp);
2098         return error;
2099     }
2100 
2101     case XFS_IOC_GOINGDOWN: {
2102         uint32_t in;
2103 
2104         if (!capable(CAP_SYS_ADMIN))
2105             return -EPERM;
2106 
2107         if (get_user(in, (uint32_t __user *)arg))
2108             return -EFAULT;
2109 
2110         return xfs_fs_goingdown(mp, in);
2111     }
2112 
2113     case XFS_IOC_ERROR_INJECTION: {
2114         xfs_error_injection_t in;
2115 
2116         if (!capable(CAP_SYS_ADMIN))
2117             return -EPERM;
2118 
2119         if (copy_from_user(&in, arg, sizeof(in)))
2120             return -EFAULT;
2121 
2122         return xfs_errortag_add(mp, in.errtag);
2123     }
2124 
2125     case XFS_IOC_ERROR_CLEARALL:
2126         if (!capable(CAP_SYS_ADMIN))
2127             return -EPERM;
2128 
2129         return xfs_errortag_clearall(mp);
2130 
2131     case XFS_IOC_FREE_EOFBLOCKS: {
2132         struct xfs_fs_eofblocks eofb;
2133         struct xfs_icwalk   icw;
2134 
2135         if (!capable(CAP_SYS_ADMIN))
2136             return -EPERM;
2137 
2138         if (xfs_is_readonly(mp))
2139             return -EROFS;
2140 
2141         if (copy_from_user(&eofb, arg, sizeof(eofb)))
2142             return -EFAULT;
2143 
2144         error = xfs_fs_eofblocks_from_user(&eofb, &icw);
2145         if (error)
2146             return error;
2147 
2148         trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_);
2149 
2150         sb_start_write(mp->m_super);
2151         error = xfs_blockgc_free_space(mp, &icw);
2152         sb_end_write(mp->m_super);
2153         return error;
2154     }
2155 
2156     default:
2157         return -ENOTTY;
2158     }
2159 }