Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/kernel.h>
0003 #include <linux/errno.h>
0004 #include <linux/fs.h>
0005 #include <linux/file.h>
0006 #include <linux/fdtable.h>
0007 #include <linux/fsnotify.h>
0008 #include <linux/namei.h>
0009 #include <linux/io_uring.h>
0010 
0011 #include <uapi/linux/io_uring.h>
0012 
0013 #include "../fs/internal.h"
0014 
0015 #include "io_uring.h"
0016 #include "rsrc.h"
0017 #include "openclose.h"
0018 
0019 struct io_open {
0020     struct file         *file;
0021     int             dfd;
0022     u32             file_slot;
0023     struct filename         *filename;
0024     struct open_how         how;
0025     unsigned long           nofile;
0026 };
0027 
0028 struct io_close {
0029     struct file         *file;
0030     int             fd;
0031     u32             file_slot;
0032 };
0033 
0034 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0035 {
0036     struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
0037     const char __user *fname;
0038     int ret;
0039 
0040     if (unlikely(sqe->buf_index))
0041         return -EINVAL;
0042     if (unlikely(req->flags & REQ_F_FIXED_FILE))
0043         return -EBADF;
0044 
0045     /* open.how should be already initialised */
0046     if (!(open->how.flags & O_PATH) && force_o_largefile())
0047         open->how.flags |= O_LARGEFILE;
0048 
0049     open->dfd = READ_ONCE(sqe->fd);
0050     fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
0051     open->filename = getname(fname);
0052     if (IS_ERR(open->filename)) {
0053         ret = PTR_ERR(open->filename);
0054         open->filename = NULL;
0055         return ret;
0056     }
0057 
0058     open->file_slot = READ_ONCE(sqe->file_index);
0059     if (open->file_slot && (open->how.flags & O_CLOEXEC))
0060         return -EINVAL;
0061 
0062     open->nofile = rlimit(RLIMIT_NOFILE);
0063     req->flags |= REQ_F_NEED_CLEANUP;
0064     return 0;
0065 }
0066 
0067 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0068 {
0069     struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
0070     u64 mode = READ_ONCE(sqe->len);
0071     u64 flags = READ_ONCE(sqe->open_flags);
0072 
0073     open->how = build_open_how(flags, mode);
0074     return __io_openat_prep(req, sqe);
0075 }
0076 
0077 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0078 {
0079     struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
0080     struct open_how __user *how;
0081     size_t len;
0082     int ret;
0083 
0084     how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
0085     len = READ_ONCE(sqe->len);
0086     if (len < OPEN_HOW_SIZE_VER0)
0087         return -EINVAL;
0088 
0089     ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
0090     if (ret)
0091         return ret;
0092 
0093     return __io_openat_prep(req, sqe);
0094 }
0095 
0096 int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
0097 {
0098     struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
0099     struct open_flags op;
0100     struct file *file;
0101     bool resolve_nonblock, nonblock_set;
0102     bool fixed = !!open->file_slot;
0103     int ret;
0104 
0105     ret = build_open_flags(&open->how, &op);
0106     if (ret)
0107         goto err;
0108     nonblock_set = op.open_flag & O_NONBLOCK;
0109     resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
0110     if (issue_flags & IO_URING_F_NONBLOCK) {
0111         /*
0112          * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
0113          * it'll always -EAGAIN
0114          */
0115         if (open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
0116             return -EAGAIN;
0117         op.lookup_flags |= LOOKUP_CACHED;
0118         op.open_flag |= O_NONBLOCK;
0119     }
0120 
0121     if (!fixed) {
0122         ret = __get_unused_fd_flags(open->how.flags, open->nofile);
0123         if (ret < 0)
0124             goto err;
0125     }
0126 
0127     file = do_filp_open(open->dfd, open->filename, &op);
0128     if (IS_ERR(file)) {
0129         /*
0130          * We could hang on to this 'fd' on retrying, but seems like
0131          * marginal gain for something that is now known to be a slower
0132          * path. So just put it, and we'll get a new one when we retry.
0133          */
0134         if (!fixed)
0135             put_unused_fd(ret);
0136 
0137         ret = PTR_ERR(file);
0138         /* only retry if RESOLVE_CACHED wasn't already set by application */
0139         if (ret == -EAGAIN &&
0140             (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
0141             return -EAGAIN;
0142         goto err;
0143     }
0144 
0145     if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
0146         file->f_flags &= ~O_NONBLOCK;
0147     fsnotify_open(file);
0148 
0149     if (!fixed)
0150         fd_install(ret, file);
0151     else
0152         ret = io_fixed_fd_install(req, issue_flags, file,
0153                         open->file_slot);
0154 err:
0155     putname(open->filename);
0156     req->flags &= ~REQ_F_NEED_CLEANUP;
0157     if (ret < 0)
0158         req_set_fail(req);
0159     io_req_set_res(req, ret, 0);
0160     return IOU_OK;
0161 }
0162 
0163 int io_openat(struct io_kiocb *req, unsigned int issue_flags)
0164 {
0165     return io_openat2(req, issue_flags);
0166 }
0167 
0168 void io_open_cleanup(struct io_kiocb *req)
0169 {
0170     struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
0171 
0172     if (open->filename)
0173         putname(open->filename);
0174 }
0175 
0176 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
0177              unsigned int offset)
0178 {
0179     int ret;
0180 
0181     io_ring_submit_lock(ctx, issue_flags);
0182     ret = io_fixed_fd_remove(ctx, offset);
0183     io_ring_submit_unlock(ctx, issue_flags);
0184 
0185     return ret;
0186 }
0187 
0188 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
0189 {
0190     struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
0191 
0192     return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
0193 }
0194 
0195 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0196 {
0197     struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
0198 
0199     if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
0200         return -EINVAL;
0201     if (req->flags & REQ_F_FIXED_FILE)
0202         return -EBADF;
0203 
0204     close->fd = READ_ONCE(sqe->fd);
0205     close->file_slot = READ_ONCE(sqe->file_index);
0206     if (close->file_slot && close->fd)
0207         return -EINVAL;
0208 
0209     return 0;
0210 }
0211 
0212 int io_close(struct io_kiocb *req, unsigned int issue_flags)
0213 {
0214     struct files_struct *files = current->files;
0215     struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
0216     struct fdtable *fdt;
0217     struct file *file;
0218     int ret = -EBADF;
0219 
0220     if (close->file_slot) {
0221         ret = io_close_fixed(req, issue_flags);
0222         goto err;
0223     }
0224 
0225     spin_lock(&files->file_lock);
0226     fdt = files_fdtable(files);
0227     if (close->fd >= fdt->max_fds) {
0228         spin_unlock(&files->file_lock);
0229         goto err;
0230     }
0231     file = rcu_dereference_protected(fdt->fd[close->fd],
0232             lockdep_is_held(&files->file_lock));
0233     if (!file || io_is_uring_fops(file)) {
0234         spin_unlock(&files->file_lock);
0235         goto err;
0236     }
0237 
0238     /* if the file has a flush method, be safe and punt to async */
0239     if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
0240         spin_unlock(&files->file_lock);
0241         return -EAGAIN;
0242     }
0243 
0244     file = __close_fd_get_file(close->fd);
0245     spin_unlock(&files->file_lock);
0246     if (!file)
0247         goto err;
0248 
0249     /* No ->flush() or already async, safely close from here */
0250     ret = filp_close(file, current->files);
0251 err:
0252     if (ret < 0)
0253         req_set_fail(req);
0254     io_req_set_res(req, ret, 0);
0255     return IOU_OK;
0256 }