Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  *  Berkeley style UIO structures   -   Alan Cox 1994.
0004  */
0005 #ifndef __LINUX_UIO_H
0006 #define __LINUX_UIO_H
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/thread_info.h>
0010 #include <linux/mm_types.h>
0011 #include <uapi/linux/uio.h>
0012 
0013 struct page;
0014 struct pipe_inode_info;
0015 
0016 struct kvec {
0017     void *iov_base; /* and that should *never* hold a userland pointer */
0018     size_t iov_len;
0019 };
0020 
0021 enum iter_type {
0022     /* iter types */
0023     ITER_IOVEC,
0024     ITER_KVEC,
0025     ITER_BVEC,
0026     ITER_PIPE,
0027     ITER_XARRAY,
0028     ITER_DISCARD,
0029     ITER_UBUF,
0030 };
0031 
0032 struct iov_iter_state {
0033     size_t iov_offset;
0034     size_t count;
0035     unsigned long nr_segs;
0036 };
0037 
0038 struct iov_iter {
0039     u8 iter_type;
0040     bool nofault;
0041     bool data_source;
0042     bool user_backed;
0043     union {
0044         size_t iov_offset;
0045         int last_offset;
0046     };
0047     size_t count;
0048     union {
0049         const struct iovec *iov;
0050         const struct kvec *kvec;
0051         const struct bio_vec *bvec;
0052         struct xarray *xarray;
0053         struct pipe_inode_info *pipe;
0054         void __user *ubuf;
0055     };
0056     union {
0057         unsigned long nr_segs;
0058         struct {
0059             unsigned int head;
0060             unsigned int start_head;
0061         };
0062         loff_t xarray_start;
0063     };
0064 };
0065 
0066 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
0067 {
0068     return i->iter_type;
0069 }
0070 
0071 static inline void iov_iter_save_state(struct iov_iter *iter,
0072                        struct iov_iter_state *state)
0073 {
0074     state->iov_offset = iter->iov_offset;
0075     state->count = iter->count;
0076     state->nr_segs = iter->nr_segs;
0077 }
0078 
0079 static inline bool iter_is_ubuf(const struct iov_iter *i)
0080 {
0081     return iov_iter_type(i) == ITER_UBUF;
0082 }
0083 
0084 static inline bool iter_is_iovec(const struct iov_iter *i)
0085 {
0086     return iov_iter_type(i) == ITER_IOVEC;
0087 }
0088 
0089 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
0090 {
0091     return iov_iter_type(i) == ITER_KVEC;
0092 }
0093 
0094 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
0095 {
0096     return iov_iter_type(i) == ITER_BVEC;
0097 }
0098 
0099 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
0100 {
0101     return iov_iter_type(i) == ITER_PIPE;
0102 }
0103 
0104 static inline bool iov_iter_is_discard(const struct iov_iter *i)
0105 {
0106     return iov_iter_type(i) == ITER_DISCARD;
0107 }
0108 
0109 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
0110 {
0111     return iov_iter_type(i) == ITER_XARRAY;
0112 }
0113 
0114 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
0115 {
0116     return i->data_source ? WRITE : READ;
0117 }
0118 
0119 static inline bool user_backed_iter(const struct iov_iter *i)
0120 {
0121     return i->user_backed;
0122 }
0123 
0124 /*
0125  * Total number of bytes covered by an iovec.
0126  *
0127  * NOTE that it is not safe to use this function until all the iovec's
0128  * segment lengths have been validated.  Because the individual lengths can
0129  * overflow a size_t when added together.
0130  */
0131 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
0132 {
0133     unsigned long seg;
0134     size_t ret = 0;
0135 
0136     for (seg = 0; seg < nr_segs; seg++)
0137         ret += iov[seg].iov_len;
0138     return ret;
0139 }
0140 
0141 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
0142 {
0143     return (struct iovec) {
0144         .iov_base = iter->iov->iov_base + iter->iov_offset,
0145         .iov_len = min(iter->count,
0146                    iter->iov->iov_len - iter->iov_offset),
0147     };
0148 }
0149 
0150 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
0151                   size_t bytes, struct iov_iter *i);
0152 void iov_iter_advance(struct iov_iter *i, size_t bytes);
0153 void iov_iter_revert(struct iov_iter *i, size_t bytes);
0154 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
0155 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
0156 size_t iov_iter_single_seg_count(const struct iov_iter *i);
0157 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
0158              struct iov_iter *i);
0159 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
0160              struct iov_iter *i);
0161 
0162 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
0163 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
0164 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
0165 
0166 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
0167         size_t bytes, struct iov_iter *i)
0168 {
0169     return copy_page_to_iter(&folio->page, offset, bytes, i);
0170 }
0171 
0172 static __always_inline __must_check
0173 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
0174 {
0175     if (check_copy_size(addr, bytes, true))
0176         return _copy_to_iter(addr, bytes, i);
0177     return 0;
0178 }
0179 
0180 static __always_inline __must_check
0181 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
0182 {
0183     if (check_copy_size(addr, bytes, false))
0184         return _copy_from_iter(addr, bytes, i);
0185     return 0;
0186 }
0187 
0188 static __always_inline __must_check
0189 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
0190 {
0191     size_t copied = copy_from_iter(addr, bytes, i);
0192     if (likely(copied == bytes))
0193         return true;
0194     iov_iter_revert(i, copied);
0195     return false;
0196 }
0197 
0198 static __always_inline __must_check
0199 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
0200 {
0201     if (check_copy_size(addr, bytes, false))
0202         return _copy_from_iter_nocache(addr, bytes, i);
0203     return 0;
0204 }
0205 
0206 static __always_inline __must_check
0207 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
0208 {
0209     size_t copied = copy_from_iter_nocache(addr, bytes, i);
0210     if (likely(copied == bytes))
0211         return true;
0212     iov_iter_revert(i, copied);
0213     return false;
0214 }
0215 
0216 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
0217 /*
0218  * Note, users like pmem that depend on the stricter semantics of
0219  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
0220  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
0221  * destination is flushed from the cache on return.
0222  */
0223 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
0224 #else
0225 #define _copy_from_iter_flushcache _copy_from_iter_nocache
0226 #endif
0227 
0228 #ifdef CONFIG_ARCH_HAS_COPY_MC
0229 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
0230 #else
0231 #define _copy_mc_to_iter _copy_to_iter
0232 #endif
0233 
0234 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
0235 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
0236             unsigned len_mask);
0237 unsigned long iov_iter_alignment(const struct iov_iter *i);
0238 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
0239 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
0240             unsigned long nr_segs, size_t count);
0241 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
0242             unsigned long nr_segs, size_t count);
0243 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
0244             unsigned long nr_segs, size_t count);
0245 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
0246             size_t count);
0247 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
0248 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
0249              loff_t start, size_t count);
0250 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
0251             size_t maxsize, unsigned maxpages, size_t *start);
0252 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
0253             size_t maxsize, size_t *start);
0254 int iov_iter_npages(const struct iov_iter *i, int maxpages);
0255 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
0256 
0257 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
0258 
0259 static inline size_t iov_iter_count(const struct iov_iter *i)
0260 {
0261     return i->count;
0262 }
0263 
0264 /*
0265  * Cap the iov_iter by given limit; note that the second argument is
0266  * *not* the new size - it's upper limit for such.  Passing it a value
0267  * greater than the amount of data in iov_iter is fine - it'll just do
0268  * nothing in that case.
0269  */
0270 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
0271 {
0272     /*
0273      * count doesn't have to fit in size_t - comparison extends both
0274      * operands to u64 here and any value that would be truncated by
0275      * conversion in assignement is by definition greater than all
0276      * values of size_t, including old i->count.
0277      */
0278     if (i->count > count)
0279         i->count = count;
0280 }
0281 
0282 /*
0283  * reexpand a previously truncated iterator; count must be no more than how much
0284  * we had shrunk it.
0285  */
0286 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
0287 {
0288     i->count = count;
0289 }
0290 
0291 static inline int
0292 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
0293 {
0294     size_t shorted = 0;
0295     int npages;
0296 
0297     if (iov_iter_count(i) > max_bytes) {
0298         shorted = iov_iter_count(i) - max_bytes;
0299         iov_iter_truncate(i, max_bytes);
0300     }
0301     npages = iov_iter_npages(i, INT_MAX);
0302     if (shorted)
0303         iov_iter_reexpand(i, iov_iter_count(i) + shorted);
0304 
0305     return npages;
0306 }
0307 
0308 struct csum_state {
0309     __wsum csum;
0310     size_t off;
0311 };
0312 
0313 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
0314 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
0315 
0316 static __always_inline __must_check
0317 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
0318                   __wsum *csum, struct iov_iter *i)
0319 {
0320     size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
0321     if (likely(copied == bytes))
0322         return true;
0323     iov_iter_revert(i, copied);
0324     return false;
0325 }
0326 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
0327         struct iov_iter *i);
0328 
0329 struct iovec *iovec_from_user(const struct iovec __user *uvector,
0330         unsigned long nr_segs, unsigned long fast_segs,
0331         struct iovec *fast_iov, bool compat);
0332 ssize_t import_iovec(int type, const struct iovec __user *uvec,
0333          unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
0334          struct iov_iter *i);
0335 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
0336          unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
0337          struct iov_iter *i, bool compat);
0338 int import_single_range(int type, void __user *buf, size_t len,
0339          struct iovec *iov, struct iov_iter *i);
0340 
0341 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
0342             void __user *buf, size_t count)
0343 {
0344     WARN_ON(direction & ~(READ | WRITE));
0345     *i = (struct iov_iter) {
0346         .iter_type = ITER_UBUF,
0347         .user_backed = true,
0348         .data_source = direction,
0349         .ubuf = buf,
0350         .count = count
0351     };
0352 }
0353 
0354 #endif