0001
0002 #include <crypto/hash.h>
0003 #include <linux/export.h>
0004 #include <linux/bvec.h>
0005 #include <linux/fault-inject-usercopy.h>
0006 #include <linux/uio.h>
0007 #include <linux/pagemap.h>
0008 #include <linux/highmem.h>
0009 #include <linux/slab.h>
0010 #include <linux/vmalloc.h>
0011 #include <linux/splice.h>
0012 #include <linux/compat.h>
0013 #include <net/checksum.h>
0014 #include <linux/scatterlist.h>
0015 #include <linux/instrumented.h>
0016
0017 #define PIPE_PARANOIA
0018
0019
0020 #define iterate_buf(i, n, base, len, off, __p, STEP) { \
0021 size_t __maybe_unused off = 0; \
0022 len = n; \
0023 base = __p + i->iov_offset; \
0024 len -= (STEP); \
0025 i->iov_offset += len; \
0026 n = len; \
0027 }
0028
0029
0030 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
0031 size_t off = 0; \
0032 size_t skip = i->iov_offset; \
0033 do { \
0034 len = min(n, __p->iov_len - skip); \
0035 if (likely(len)) { \
0036 base = __p->iov_base + skip; \
0037 len -= (STEP); \
0038 off += len; \
0039 skip += len; \
0040 n -= len; \
0041 if (skip < __p->iov_len) \
0042 break; \
0043 } \
0044 __p++; \
0045 skip = 0; \
0046 } while (n); \
0047 i->iov_offset = skip; \
0048 n = off; \
0049 }
0050
0051 #define iterate_bvec(i, n, base, len, off, p, STEP) { \
0052 size_t off = 0; \
0053 unsigned skip = i->iov_offset; \
0054 while (n) { \
0055 unsigned offset = p->bv_offset + skip; \
0056 unsigned left; \
0057 void *kaddr = kmap_local_page(p->bv_page + \
0058 offset / PAGE_SIZE); \
0059 base = kaddr + offset % PAGE_SIZE; \
0060 len = min(min(n, (size_t)(p->bv_len - skip)), \
0061 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
0062 left = (STEP); \
0063 kunmap_local(kaddr); \
0064 len -= left; \
0065 off += len; \
0066 skip += len; \
0067 if (skip == p->bv_len) { \
0068 skip = 0; \
0069 p++; \
0070 } \
0071 n -= len; \
0072 if (left) \
0073 break; \
0074 } \
0075 i->iov_offset = skip; \
0076 n = off; \
0077 }
0078
0079 #define iterate_xarray(i, n, base, len, __off, STEP) { \
0080 __label__ __out; \
0081 size_t __off = 0; \
0082 struct folio *folio; \
0083 loff_t start = i->xarray_start + i->iov_offset; \
0084 pgoff_t index = start / PAGE_SIZE; \
0085 XA_STATE(xas, i->xarray, index); \
0086 \
0087 len = PAGE_SIZE - offset_in_page(start); \
0088 rcu_read_lock(); \
0089 xas_for_each(&xas, folio, ULONG_MAX) { \
0090 unsigned left; \
0091 size_t offset; \
0092 if (xas_retry(&xas, folio)) \
0093 continue; \
0094 if (WARN_ON(xa_is_value(folio))) \
0095 break; \
0096 if (WARN_ON(folio_test_hugetlb(folio))) \
0097 break; \
0098 offset = offset_in_folio(folio, start + __off); \
0099 while (offset < folio_size(folio)) { \
0100 base = kmap_local_folio(folio, offset); \
0101 len = min(n, len); \
0102 left = (STEP); \
0103 kunmap_local(base); \
0104 len -= left; \
0105 __off += len; \
0106 n -= len; \
0107 if (left || n == 0) \
0108 goto __out; \
0109 offset += len; \
0110 len = PAGE_SIZE; \
0111 } \
0112 } \
0113 __out: \
0114 rcu_read_unlock(); \
0115 i->iov_offset += __off; \
0116 n = __off; \
0117 }
0118
0119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \
0120 if (unlikely(i->count < n)) \
0121 n = i->count; \
0122 if (likely(n)) { \
0123 if (likely(iter_is_ubuf(i))) { \
0124 void __user *base; \
0125 size_t len; \
0126 iterate_buf(i, n, base, len, off, \
0127 i->ubuf, (I)) \
0128 } else if (likely(iter_is_iovec(i))) { \
0129 const struct iovec *iov = i->iov; \
0130 void __user *base; \
0131 size_t len; \
0132 iterate_iovec(i, n, base, len, off, \
0133 iov, (I)) \
0134 i->nr_segs -= iov - i->iov; \
0135 i->iov = iov; \
0136 } else if (iov_iter_is_bvec(i)) { \
0137 const struct bio_vec *bvec = i->bvec; \
0138 void *base; \
0139 size_t len; \
0140 iterate_bvec(i, n, base, len, off, \
0141 bvec, (K)) \
0142 i->nr_segs -= bvec - i->bvec; \
0143 i->bvec = bvec; \
0144 } else if (iov_iter_is_kvec(i)) { \
0145 const struct kvec *kvec = i->kvec; \
0146 void *base; \
0147 size_t len; \
0148 iterate_iovec(i, n, base, len, off, \
0149 kvec, (K)) \
0150 i->nr_segs -= kvec - i->kvec; \
0151 i->kvec = kvec; \
0152 } else if (iov_iter_is_xarray(i)) { \
0153 void *base; \
0154 size_t len; \
0155 iterate_xarray(i, n, base, len, off, \
0156 (K)) \
0157 } \
0158 i->count -= n; \
0159 } \
0160 }
0161 #define iterate_and_advance(i, n, base, len, off, I, K) \
0162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
0163
0164 static int copyout(void __user *to, const void *from, size_t n)
0165 {
0166 if (should_fail_usercopy())
0167 return n;
0168 if (access_ok(to, n)) {
0169 instrument_copy_to_user(to, from, n);
0170 n = raw_copy_to_user(to, from, n);
0171 }
0172 return n;
0173 }
0174
0175 static int copyin(void *to, const void __user *from, size_t n)
0176 {
0177 if (should_fail_usercopy())
0178 return n;
0179 if (access_ok(from, n)) {
0180 instrument_copy_from_user(to, from, n);
0181 n = raw_copy_from_user(to, from, n);
0182 }
0183 return n;
0184 }
0185
0186 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
0187 unsigned int slot)
0188 {
0189 return &pipe->bufs[slot & (pipe->ring_size - 1)];
0190 }
0191
0192 #ifdef PIPE_PARANOIA
0193 static bool sanity(const struct iov_iter *i)
0194 {
0195 struct pipe_inode_info *pipe = i->pipe;
0196 unsigned int p_head = pipe->head;
0197 unsigned int p_tail = pipe->tail;
0198 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
0199 unsigned int i_head = i->head;
0200 unsigned int idx;
0201
0202 if (i->last_offset) {
0203 struct pipe_buffer *p;
0204 if (unlikely(p_occupancy == 0))
0205 goto Bad;
0206 if (unlikely(i_head != p_head - 1))
0207 goto Bad;
0208
0209 p = pipe_buf(pipe, i_head);
0210 if (unlikely(p->offset + p->len != abs(i->last_offset)))
0211 goto Bad;
0212 } else {
0213 if (i_head != p_head)
0214 goto Bad;
0215 }
0216 return true;
0217 Bad:
0218 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
0219 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
0220 p_head, p_tail, pipe->ring_size);
0221 for (idx = 0; idx < pipe->ring_size; idx++)
0222 printk(KERN_ERR "[%p %p %d %d]\n",
0223 pipe->bufs[idx].ops,
0224 pipe->bufs[idx].page,
0225 pipe->bufs[idx].offset,
0226 pipe->bufs[idx].len);
0227 WARN_ON(1);
0228 return false;
0229 }
0230 #else
0231 #define sanity(i) true
0232 #endif
0233
0234 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
0235 {
0236 struct page *page = alloc_page(GFP_USER);
0237 if (page) {
0238 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
0239 *buf = (struct pipe_buffer) {
0240 .ops = &default_pipe_buf_ops,
0241 .page = page,
0242 .offset = 0,
0243 .len = size
0244 };
0245 }
0246 return page;
0247 }
0248
0249 static void push_page(struct pipe_inode_info *pipe, struct page *page,
0250 unsigned int offset, unsigned int size)
0251 {
0252 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
0253 *buf = (struct pipe_buffer) {
0254 .ops = &page_cache_pipe_buf_ops,
0255 .page = page,
0256 .offset = offset,
0257 .len = size
0258 };
0259 get_page(page);
0260 }
0261
0262 static inline int last_offset(const struct pipe_buffer *buf)
0263 {
0264 if (buf->ops == &default_pipe_buf_ops)
0265 return buf->len;
0266 else
0267 return -(buf->offset + buf->len);
0268 }
0269
0270 static struct page *append_pipe(struct iov_iter *i, size_t size,
0271 unsigned int *off)
0272 {
0273 struct pipe_inode_info *pipe = i->pipe;
0274 int offset = i->last_offset;
0275 struct pipe_buffer *buf;
0276 struct page *page;
0277
0278 if (offset > 0 && offset < PAGE_SIZE) {
0279
0280 buf = pipe_buf(pipe, pipe->head - 1);
0281 size = min_t(size_t, size, PAGE_SIZE - offset);
0282 buf->len += size;
0283 i->last_offset += size;
0284 i->count -= size;
0285 *off = offset;
0286 return buf->page;
0287 }
0288
0289 *off = 0;
0290 size = min_t(size_t, size, PAGE_SIZE);
0291 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
0292 return NULL;
0293 page = push_anon(pipe, size);
0294 if (!page)
0295 return NULL;
0296 i->head = pipe->head - 1;
0297 i->last_offset = size;
0298 i->count -= size;
0299 return page;
0300 }
0301
0302 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
0303 struct iov_iter *i)
0304 {
0305 struct pipe_inode_info *pipe = i->pipe;
0306 unsigned int head = pipe->head;
0307
0308 if (unlikely(bytes > i->count))
0309 bytes = i->count;
0310
0311 if (unlikely(!bytes))
0312 return 0;
0313
0314 if (!sanity(i))
0315 return 0;
0316
0317 if (offset && i->last_offset == -offset) {
0318 struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
0319 if (buf->page == page) {
0320 buf->len += bytes;
0321 i->last_offset -= bytes;
0322 i->count -= bytes;
0323 return bytes;
0324 }
0325 }
0326 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
0327 return 0;
0328
0329 push_page(pipe, page, offset, bytes);
0330 i->last_offset = -(offset + bytes);
0331 i->head = head;
0332 i->count -= bytes;
0333 return bytes;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
0350 {
0351 if (iter_is_ubuf(i)) {
0352 size_t n = min(size, iov_iter_count(i));
0353 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
0354 return size - n;
0355 } else if (iter_is_iovec(i)) {
0356 size_t count = min(size, iov_iter_count(i));
0357 const struct iovec *p;
0358 size_t skip;
0359
0360 size -= count;
0361 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
0362 size_t len = min(count, p->iov_len - skip);
0363 size_t ret;
0364
0365 if (unlikely(!len))
0366 continue;
0367 ret = fault_in_readable(p->iov_base + skip, len);
0368 count -= len - ret;
0369 if (ret)
0370 break;
0371 }
0372 return count + size;
0373 }
0374 return 0;
0375 }
0376 EXPORT_SYMBOL(fault_in_iov_iter_readable);
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
0393 {
0394 if (iter_is_ubuf(i)) {
0395 size_t n = min(size, iov_iter_count(i));
0396 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
0397 return size - n;
0398 } else if (iter_is_iovec(i)) {
0399 size_t count = min(size, iov_iter_count(i));
0400 const struct iovec *p;
0401 size_t skip;
0402
0403 size -= count;
0404 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
0405 size_t len = min(count, p->iov_len - skip);
0406 size_t ret;
0407
0408 if (unlikely(!len))
0409 continue;
0410 ret = fault_in_safe_writeable(p->iov_base + skip, len);
0411 count -= len - ret;
0412 if (ret)
0413 break;
0414 }
0415 return count + size;
0416 }
0417 return 0;
0418 }
0419 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
0420
0421 void iov_iter_init(struct iov_iter *i, unsigned int direction,
0422 const struct iovec *iov, unsigned long nr_segs,
0423 size_t count)
0424 {
0425 WARN_ON(direction & ~(READ | WRITE));
0426 *i = (struct iov_iter) {
0427 .iter_type = ITER_IOVEC,
0428 .nofault = false,
0429 .user_backed = true,
0430 .data_source = direction,
0431 .iov = iov,
0432 .nr_segs = nr_segs,
0433 .iov_offset = 0,
0434 .count = count
0435 };
0436 }
0437 EXPORT_SYMBOL(iov_iter_init);
0438
0439
0440 static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
0441 {
0442 struct pipe_inode_info *pipe = i->pipe;
0443 int used = pipe->head - pipe->tail;
0444 int off = i->last_offset;
0445
0446 *npages = max((int)pipe->max_usage - used, 0);
0447
0448 if (off > 0 && off < PAGE_SIZE) {
0449 (*npages)++;
0450 return off;
0451 }
0452 return 0;
0453 }
0454
0455 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
0456 struct iov_iter *i)
0457 {
0458 unsigned int off, chunk;
0459
0460 if (unlikely(bytes > i->count))
0461 bytes = i->count;
0462 if (unlikely(!bytes))
0463 return 0;
0464
0465 if (!sanity(i))
0466 return 0;
0467
0468 for (size_t n = bytes; n; n -= chunk) {
0469 struct page *page = append_pipe(i, n, &off);
0470 chunk = min_t(size_t, n, PAGE_SIZE - off);
0471 if (!page)
0472 return bytes - n;
0473 memcpy_to_page(page, off, addr, chunk);
0474 addr += chunk;
0475 }
0476 return bytes;
0477 }
0478
0479 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
0480 __wsum sum, size_t off)
0481 {
0482 __wsum next = csum_partial_copy_nocheck(from, to, len);
0483 return csum_block_add(sum, next, off);
0484 }
0485
0486 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
0487 struct iov_iter *i, __wsum *sump)
0488 {
0489 __wsum sum = *sump;
0490 size_t off = 0;
0491 unsigned int chunk, r;
0492
0493 if (unlikely(bytes > i->count))
0494 bytes = i->count;
0495 if (unlikely(!bytes))
0496 return 0;
0497
0498 if (!sanity(i))
0499 return 0;
0500
0501 while (bytes) {
0502 struct page *page = append_pipe(i, bytes, &r);
0503 char *p;
0504
0505 if (!page)
0506 break;
0507 chunk = min_t(size_t, bytes, PAGE_SIZE - r);
0508 p = kmap_local_page(page);
0509 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
0510 kunmap_local(p);
0511 off += chunk;
0512 bytes -= chunk;
0513 }
0514 *sump = sum;
0515 return off;
0516 }
0517
0518 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
0519 {
0520 if (unlikely(iov_iter_is_pipe(i)))
0521 return copy_pipe_to_iter(addr, bytes, i);
0522 if (user_backed_iter(i))
0523 might_fault();
0524 iterate_and_advance(i, bytes, base, len, off,
0525 copyout(base, addr + off, len),
0526 memcpy(base, addr + off, len)
0527 )
0528
0529 return bytes;
0530 }
0531 EXPORT_SYMBOL(_copy_to_iter);
0532
0533 #ifdef CONFIG_ARCH_HAS_COPY_MC
0534 static int copyout_mc(void __user *to, const void *from, size_t n)
0535 {
0536 if (access_ok(to, n)) {
0537 instrument_copy_to_user(to, from, n);
0538 n = copy_mc_to_user((__force void *) to, from, n);
0539 }
0540 return n;
0541 }
0542
0543 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
0544 struct iov_iter *i)
0545 {
0546 size_t xfer = 0;
0547 unsigned int off, chunk;
0548
0549 if (unlikely(bytes > i->count))
0550 bytes = i->count;
0551 if (unlikely(!bytes))
0552 return 0;
0553
0554 if (!sanity(i))
0555 return 0;
0556
0557 while (bytes) {
0558 struct page *page = append_pipe(i, bytes, &off);
0559 unsigned long rem;
0560 char *p;
0561
0562 if (!page)
0563 break;
0564 chunk = min_t(size_t, bytes, PAGE_SIZE - off);
0565 p = kmap_local_page(page);
0566 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
0567 chunk -= rem;
0568 kunmap_local(p);
0569 xfer += chunk;
0570 bytes -= chunk;
0571 if (rem) {
0572 iov_iter_revert(i, rem);
0573 break;
0574 }
0575 }
0576 return xfer;
0577 }
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
0605 {
0606 if (unlikely(iov_iter_is_pipe(i)))
0607 return copy_mc_pipe_to_iter(addr, bytes, i);
0608 if (user_backed_iter(i))
0609 might_fault();
0610 __iterate_and_advance(i, bytes, base, len, off,
0611 copyout_mc(base, addr + off, len),
0612 copy_mc_to_kernel(base, addr + off, len)
0613 )
0614
0615 return bytes;
0616 }
0617 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
0618 #endif
0619
0620 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
0621 {
0622 if (unlikely(iov_iter_is_pipe(i))) {
0623 WARN_ON(1);
0624 return 0;
0625 }
0626 if (user_backed_iter(i))
0627 might_fault();
0628 iterate_and_advance(i, bytes, base, len, off,
0629 copyin(addr + off, base, len),
0630 memcpy(addr + off, base, len)
0631 )
0632
0633 return bytes;
0634 }
0635 EXPORT_SYMBOL(_copy_from_iter);
0636
0637 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
0638 {
0639 if (unlikely(iov_iter_is_pipe(i))) {
0640 WARN_ON(1);
0641 return 0;
0642 }
0643 iterate_and_advance(i, bytes, base, len, off,
0644 __copy_from_user_inatomic_nocache(addr + off, base, len),
0645 memcpy(addr + off, base, len)
0646 )
0647
0648 return bytes;
0649 }
0650 EXPORT_SYMBOL(_copy_from_iter_nocache);
0651
0652 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0670 {
0671 if (unlikely(iov_iter_is_pipe(i))) {
0672 WARN_ON(1);
0673 return 0;
0674 }
0675 iterate_and_advance(i, bytes, base, len, off,
0676 __copy_from_user_flushcache(addr + off, base, len),
0677 memcpy_flushcache(addr + off, base, len)
0678 )
0679
0680 return bytes;
0681 }
0682 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0683 #endif
0684
0685 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
0686 {
0687 struct page *head;
0688 size_t v = n + offset;
0689
0690
0691
0692
0693
0694
0695
0696
0697 if (n <= v && v <= PAGE_SIZE)
0698 return true;
0699
0700 head = compound_head(page);
0701 v += (page - head) << PAGE_SHIFT;
0702
0703 if (likely(n <= v && v <= (page_size(head))))
0704 return true;
0705 WARN_ON(1);
0706 return false;
0707 }
0708
0709 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
0710 struct iov_iter *i)
0711 {
0712 size_t res = 0;
0713 if (unlikely(!page_copy_sane(page, offset, bytes)))
0714 return 0;
0715 if (unlikely(iov_iter_is_pipe(i)))
0716 return copy_page_to_iter_pipe(page, offset, bytes, i);
0717 page += offset / PAGE_SIZE;
0718 offset %= PAGE_SIZE;
0719 while (1) {
0720 void *kaddr = kmap_local_page(page);
0721 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
0722 n = _copy_to_iter(kaddr + offset, n, i);
0723 kunmap_local(kaddr);
0724 res += n;
0725 bytes -= n;
0726 if (!bytes || !n)
0727 break;
0728 offset += n;
0729 if (offset == PAGE_SIZE) {
0730 page++;
0731 offset = 0;
0732 }
0733 }
0734 return res;
0735 }
0736 EXPORT_SYMBOL(copy_page_to_iter);
0737
0738 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
0739 struct iov_iter *i)
0740 {
0741 size_t res = 0;
0742 if (!page_copy_sane(page, offset, bytes))
0743 return 0;
0744 page += offset / PAGE_SIZE;
0745 offset %= PAGE_SIZE;
0746 while (1) {
0747 void *kaddr = kmap_local_page(page);
0748 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
0749 n = _copy_from_iter(kaddr + offset, n, i);
0750 kunmap_local(kaddr);
0751 res += n;
0752 bytes -= n;
0753 if (!bytes || !n)
0754 break;
0755 offset += n;
0756 if (offset == PAGE_SIZE) {
0757 page++;
0758 offset = 0;
0759 }
0760 }
0761 return res;
0762 }
0763 EXPORT_SYMBOL(copy_page_from_iter);
0764
0765 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
0766 {
0767 unsigned int chunk, off;
0768
0769 if (unlikely(bytes > i->count))
0770 bytes = i->count;
0771 if (unlikely(!bytes))
0772 return 0;
0773
0774 if (!sanity(i))
0775 return 0;
0776
0777 for (size_t n = bytes; n; n -= chunk) {
0778 struct page *page = append_pipe(i, n, &off);
0779 char *p;
0780
0781 if (!page)
0782 return bytes - n;
0783 chunk = min_t(size_t, n, PAGE_SIZE - off);
0784 p = kmap_local_page(page);
0785 memset(p + off, 0, chunk);
0786 kunmap_local(p);
0787 }
0788 return bytes;
0789 }
0790
0791 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
0792 {
0793 if (unlikely(iov_iter_is_pipe(i)))
0794 return pipe_zero(bytes, i);
0795 iterate_and_advance(i, bytes, base, len, count,
0796 clear_user(base, len),
0797 memset(base, 0, len)
0798 )
0799
0800 return bytes;
0801 }
0802 EXPORT_SYMBOL(iov_iter_zero);
0803
0804 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
0805 struct iov_iter *i)
0806 {
0807 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
0808 if (unlikely(!page_copy_sane(page, offset, bytes))) {
0809 kunmap_atomic(kaddr);
0810 return 0;
0811 }
0812 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
0813 kunmap_atomic(kaddr);
0814 WARN_ON(1);
0815 return 0;
0816 }
0817 iterate_and_advance(i, bytes, base, len, off,
0818 copyin(p + off, base, len),
0819 memcpy(p + off, base, len)
0820 )
0821 kunmap_atomic(kaddr);
0822 return bytes;
0823 }
0824 EXPORT_SYMBOL(copy_page_from_iter_atomic);
0825
0826 static void pipe_advance(struct iov_iter *i, size_t size)
0827 {
0828 struct pipe_inode_info *pipe = i->pipe;
0829 int off = i->last_offset;
0830
0831 if (!off && !size) {
0832 pipe_discard_from(pipe, i->start_head);
0833 return;
0834 }
0835 i->count -= size;
0836 while (1) {
0837 struct pipe_buffer *buf = pipe_buf(pipe, i->head);
0838 if (off)
0839 size += abs(off) - buf->offset;
0840 if (size <= buf->len) {
0841 buf->len = size;
0842 i->last_offset = last_offset(buf);
0843 break;
0844 }
0845 size -= buf->len;
0846 i->head++;
0847 off = 0;
0848 }
0849 pipe_discard_from(pipe, i->head + 1);
0850 }
0851
0852 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
0853 {
0854 const struct bio_vec *bvec, *end;
0855
0856 if (!i->count)
0857 return;
0858 i->count -= size;
0859
0860 size += i->iov_offset;
0861
0862 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
0863 if (likely(size < bvec->bv_len))
0864 break;
0865 size -= bvec->bv_len;
0866 }
0867 i->iov_offset = size;
0868 i->nr_segs -= bvec - i->bvec;
0869 i->bvec = bvec;
0870 }
0871
0872 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
0873 {
0874 const struct iovec *iov, *end;
0875
0876 if (!i->count)
0877 return;
0878 i->count -= size;
0879
0880 size += i->iov_offset;
0881 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
0882 if (likely(size < iov->iov_len))
0883 break;
0884 size -= iov->iov_len;
0885 }
0886 i->iov_offset = size;
0887 i->nr_segs -= iov - i->iov;
0888 i->iov = iov;
0889 }
0890
0891 void iov_iter_advance(struct iov_iter *i, size_t size)
0892 {
0893 if (unlikely(i->count < size))
0894 size = i->count;
0895 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
0896 i->iov_offset += size;
0897 i->count -= size;
0898 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
0899
0900 iov_iter_iovec_advance(i, size);
0901 } else if (iov_iter_is_bvec(i)) {
0902 iov_iter_bvec_advance(i, size);
0903 } else if (iov_iter_is_pipe(i)) {
0904 pipe_advance(i, size);
0905 } else if (iov_iter_is_discard(i)) {
0906 i->count -= size;
0907 }
0908 }
0909 EXPORT_SYMBOL(iov_iter_advance);
0910
0911 void iov_iter_revert(struct iov_iter *i, size_t unroll)
0912 {
0913 if (!unroll)
0914 return;
0915 if (WARN_ON(unroll > MAX_RW_COUNT))
0916 return;
0917 i->count += unroll;
0918 if (unlikely(iov_iter_is_pipe(i))) {
0919 struct pipe_inode_info *pipe = i->pipe;
0920 unsigned int head = pipe->head;
0921
0922 while (head > i->start_head) {
0923 struct pipe_buffer *b = pipe_buf(pipe, --head);
0924 if (unroll < b->len) {
0925 b->len -= unroll;
0926 i->last_offset = last_offset(b);
0927 i->head = head;
0928 return;
0929 }
0930 unroll -= b->len;
0931 pipe_buf_release(pipe, b);
0932 pipe->head--;
0933 }
0934 i->last_offset = 0;
0935 i->head = head;
0936 return;
0937 }
0938 if (unlikely(iov_iter_is_discard(i)))
0939 return;
0940 if (unroll <= i->iov_offset) {
0941 i->iov_offset -= unroll;
0942 return;
0943 }
0944 unroll -= i->iov_offset;
0945 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
0946 BUG();
0947
0948
0949
0950 } else if (iov_iter_is_bvec(i)) {
0951 const struct bio_vec *bvec = i->bvec;
0952 while (1) {
0953 size_t n = (--bvec)->bv_len;
0954 i->nr_segs++;
0955 if (unroll <= n) {
0956 i->bvec = bvec;
0957 i->iov_offset = n - unroll;
0958 return;
0959 }
0960 unroll -= n;
0961 }
0962 } else {
0963 const struct iovec *iov = i->iov;
0964 while (1) {
0965 size_t n = (--iov)->iov_len;
0966 i->nr_segs++;
0967 if (unroll <= n) {
0968 i->iov = iov;
0969 i->iov_offset = n - unroll;
0970 return;
0971 }
0972 unroll -= n;
0973 }
0974 }
0975 }
0976 EXPORT_SYMBOL(iov_iter_revert);
0977
0978
0979
0980
0981 size_t iov_iter_single_seg_count(const struct iov_iter *i)
0982 {
0983 if (i->nr_segs > 1) {
0984 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
0985 return min(i->count, i->iov->iov_len - i->iov_offset);
0986 if (iov_iter_is_bvec(i))
0987 return min(i->count, i->bvec->bv_len - i->iov_offset);
0988 }
0989 return i->count;
0990 }
0991 EXPORT_SYMBOL(iov_iter_single_seg_count);
0992
0993 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
0994 const struct kvec *kvec, unsigned long nr_segs,
0995 size_t count)
0996 {
0997 WARN_ON(direction & ~(READ | WRITE));
0998 *i = (struct iov_iter){
0999 .iter_type = ITER_KVEC,
1000 .data_source = direction,
1001 .kvec = kvec,
1002 .nr_segs = nr_segs,
1003 .iov_offset = 0,
1004 .count = count
1005 };
1006 }
1007 EXPORT_SYMBOL(iov_iter_kvec);
1008
1009 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1010 const struct bio_vec *bvec, unsigned long nr_segs,
1011 size_t count)
1012 {
1013 WARN_ON(direction & ~(READ | WRITE));
1014 *i = (struct iov_iter){
1015 .iter_type = ITER_BVEC,
1016 .data_source = direction,
1017 .bvec = bvec,
1018 .nr_segs = nr_segs,
1019 .iov_offset = 0,
1020 .count = count
1021 };
1022 }
1023 EXPORT_SYMBOL(iov_iter_bvec);
1024
1025 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1026 struct pipe_inode_info *pipe,
1027 size_t count)
1028 {
1029 BUG_ON(direction != READ);
1030 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1031 *i = (struct iov_iter){
1032 .iter_type = ITER_PIPE,
1033 .data_source = false,
1034 .pipe = pipe,
1035 .head = pipe->head,
1036 .start_head = pipe->head,
1037 .last_offset = 0,
1038 .count = count
1039 };
1040 }
1041 EXPORT_SYMBOL(iov_iter_pipe);
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1057 struct xarray *xarray, loff_t start, size_t count)
1058 {
1059 BUG_ON(direction & ~1);
1060 *i = (struct iov_iter) {
1061 .iter_type = ITER_XARRAY,
1062 .data_source = direction,
1063 .xarray = xarray,
1064 .xarray_start = start,
1065 .count = count,
1066 .iov_offset = 0
1067 };
1068 }
1069 EXPORT_SYMBOL(iov_iter_xarray);
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1081 {
1082 BUG_ON(direction != READ);
1083 *i = (struct iov_iter){
1084 .iter_type = ITER_DISCARD,
1085 .data_source = false,
1086 .count = count,
1087 .iov_offset = 0
1088 };
1089 }
1090 EXPORT_SYMBOL(iov_iter_discard);
1091
1092 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1093 unsigned len_mask)
1094 {
1095 size_t size = i->count;
1096 size_t skip = i->iov_offset;
1097 unsigned k;
1098
1099 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1100 size_t len = i->iov[k].iov_len - skip;
1101
1102 if (len > size)
1103 len = size;
1104 if (len & len_mask)
1105 return false;
1106 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1107 return false;
1108
1109 size -= len;
1110 if (!size)
1111 break;
1112 }
1113 return true;
1114 }
1115
1116 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1117 unsigned len_mask)
1118 {
1119 size_t size = i->count;
1120 unsigned skip = i->iov_offset;
1121 unsigned k;
1122
1123 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1124 size_t len = i->bvec[k].bv_len - skip;
1125
1126 if (len > size)
1127 len = size;
1128 if (len & len_mask)
1129 return false;
1130 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1131 return false;
1132
1133 size -= len;
1134 if (!size)
1135 break;
1136 }
1137 return true;
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1151 unsigned len_mask)
1152 {
1153 if (likely(iter_is_ubuf(i))) {
1154 if (i->count & len_mask)
1155 return false;
1156 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1157 return false;
1158 return true;
1159 }
1160
1161 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1162 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1163
1164 if (iov_iter_is_bvec(i))
1165 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1166
1167 if (iov_iter_is_pipe(i)) {
1168 size_t size = i->count;
1169
1170 if (size & len_mask)
1171 return false;
1172 if (size && i->last_offset > 0) {
1173 if (i->last_offset & addr_mask)
1174 return false;
1175 }
1176
1177 return true;
1178 }
1179
1180 if (iov_iter_is_xarray(i)) {
1181 if (i->count & len_mask)
1182 return false;
1183 if ((i->xarray_start + i->iov_offset) & addr_mask)
1184 return false;
1185 }
1186
1187 return true;
1188 }
1189 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1190
1191 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1192 {
1193 unsigned long res = 0;
1194 size_t size = i->count;
1195 size_t skip = i->iov_offset;
1196 unsigned k;
1197
1198 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1199 size_t len = i->iov[k].iov_len - skip;
1200 if (len) {
1201 res |= (unsigned long)i->iov[k].iov_base + skip;
1202 if (len > size)
1203 len = size;
1204 res |= len;
1205 size -= len;
1206 if (!size)
1207 break;
1208 }
1209 }
1210 return res;
1211 }
1212
1213 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1214 {
1215 unsigned res = 0;
1216 size_t size = i->count;
1217 unsigned skip = i->iov_offset;
1218 unsigned k;
1219
1220 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1221 size_t len = i->bvec[k].bv_len - skip;
1222 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1223 if (len > size)
1224 len = size;
1225 res |= len;
1226 size -= len;
1227 if (!size)
1228 break;
1229 }
1230 return res;
1231 }
1232
1233 unsigned long iov_iter_alignment(const struct iov_iter *i)
1234 {
1235 if (likely(iter_is_ubuf(i))) {
1236 size_t size = i->count;
1237 if (size)
1238 return ((unsigned long)i->ubuf + i->iov_offset) | size;
1239 return 0;
1240 }
1241
1242
1243 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1244 return iov_iter_alignment_iovec(i);
1245
1246 if (iov_iter_is_bvec(i))
1247 return iov_iter_alignment_bvec(i);
1248
1249 if (iov_iter_is_pipe(i)) {
1250 size_t size = i->count;
1251
1252 if (size && i->last_offset > 0)
1253 return size | i->last_offset;
1254 return size;
1255 }
1256
1257 if (iov_iter_is_xarray(i))
1258 return (i->xarray_start + i->iov_offset) | i->count;
1259
1260 return 0;
1261 }
1262 EXPORT_SYMBOL(iov_iter_alignment);
1263
1264 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1265 {
1266 unsigned long res = 0;
1267 unsigned long v = 0;
1268 size_t size = i->count;
1269 unsigned k;
1270
1271 if (iter_is_ubuf(i))
1272 return 0;
1273
1274 if (WARN_ON(!iter_is_iovec(i)))
1275 return ~0U;
1276
1277 for (k = 0; k < i->nr_segs; k++) {
1278 if (i->iov[k].iov_len) {
1279 unsigned long base = (unsigned long)i->iov[k].iov_base;
1280 if (v)
1281 res |= base | v;
1282 v = base + i->iov[k].iov_len;
1283 if (size <= i->iov[k].iov_len)
1284 break;
1285 size -= i->iov[k].iov_len;
1286 }
1287 }
1288 return res;
1289 }
1290 EXPORT_SYMBOL(iov_iter_gap_alignment);
1291
1292 static int want_pages_array(struct page ***res, size_t size,
1293 size_t start, unsigned int maxpages)
1294 {
1295 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
1296
1297 if (count > maxpages)
1298 count = maxpages;
1299 WARN_ON(!count);
1300 if (!*res) {
1301 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
1302 if (!*res)
1303 return 0;
1304 }
1305 return count;
1306 }
1307
1308 static ssize_t pipe_get_pages(struct iov_iter *i,
1309 struct page ***pages, size_t maxsize, unsigned maxpages,
1310 size_t *start)
1311 {
1312 unsigned int npages, count, off, chunk;
1313 struct page **p;
1314 size_t left;
1315
1316 if (!sanity(i))
1317 return -EFAULT;
1318
1319 *start = off = pipe_npages(i, &npages);
1320 if (!npages)
1321 return -EFAULT;
1322 count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
1323 if (!count)
1324 return -ENOMEM;
1325 p = *pages;
1326 for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
1327 struct page *page = append_pipe(i, left, &off);
1328 if (!page)
1329 break;
1330 chunk = min_t(size_t, left, PAGE_SIZE - off);
1331 get_page(*p++ = page);
1332 }
1333 if (!npages)
1334 return -EFAULT;
1335 return maxsize - left;
1336 }
1337
1338 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1339 pgoff_t index, unsigned int nr_pages)
1340 {
1341 XA_STATE(xas, xa, index);
1342 struct page *page;
1343 unsigned int ret = 0;
1344
1345 rcu_read_lock();
1346 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1347 if (xas_retry(&xas, page))
1348 continue;
1349
1350
1351 if (unlikely(page != xas_reload(&xas))) {
1352 xas_reset(&xas);
1353 continue;
1354 }
1355
1356 pages[ret] = find_subpage(page, xas.xa_index);
1357 get_page(pages[ret]);
1358 if (++ret == nr_pages)
1359 break;
1360 }
1361 rcu_read_unlock();
1362 return ret;
1363 }
1364
1365 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1366 struct page ***pages, size_t maxsize,
1367 unsigned maxpages, size_t *_start_offset)
1368 {
1369 unsigned nr, offset, count;
1370 pgoff_t index;
1371 loff_t pos;
1372
1373 pos = i->xarray_start + i->iov_offset;
1374 index = pos >> PAGE_SHIFT;
1375 offset = pos & ~PAGE_MASK;
1376 *_start_offset = offset;
1377
1378 count = want_pages_array(pages, maxsize, offset, maxpages);
1379 if (!count)
1380 return -ENOMEM;
1381 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1382 if (nr == 0)
1383 return 0;
1384
1385 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1386 i->iov_offset += maxsize;
1387 i->count -= maxsize;
1388 return maxsize;
1389 }
1390
1391
1392 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1393 {
1394 size_t skip;
1395 long k;
1396
1397 if (iter_is_ubuf(i))
1398 return (unsigned long)i->ubuf + i->iov_offset;
1399
1400 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1401 size_t len = i->iov[k].iov_len - skip;
1402
1403 if (unlikely(!len))
1404 continue;
1405 if (*size > len)
1406 *size = len;
1407 return (unsigned long)i->iov[k].iov_base + skip;
1408 }
1409 BUG();
1410 }
1411
1412
1413 static struct page *first_bvec_segment(const struct iov_iter *i,
1414 size_t *size, size_t *start)
1415 {
1416 struct page *page;
1417 size_t skip = i->iov_offset, len;
1418
1419 len = i->bvec->bv_len - skip;
1420 if (*size > len)
1421 *size = len;
1422 skip += i->bvec->bv_offset;
1423 page = i->bvec->bv_page + skip / PAGE_SIZE;
1424 *start = skip % PAGE_SIZE;
1425 return page;
1426 }
1427
1428 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1429 struct page ***pages, size_t maxsize,
1430 unsigned int maxpages, size_t *start)
1431 {
1432 unsigned int n;
1433
1434 if (maxsize > i->count)
1435 maxsize = i->count;
1436 if (!maxsize)
1437 return 0;
1438 if (maxsize > MAX_RW_COUNT)
1439 maxsize = MAX_RW_COUNT;
1440
1441 if (likely(user_backed_iter(i))) {
1442 unsigned int gup_flags = 0;
1443 unsigned long addr;
1444 int res;
1445
1446 if (iov_iter_rw(i) != WRITE)
1447 gup_flags |= FOLL_WRITE;
1448 if (i->nofault)
1449 gup_flags |= FOLL_NOFAULT;
1450
1451 addr = first_iovec_segment(i, &maxsize);
1452 *start = addr % PAGE_SIZE;
1453 addr &= PAGE_MASK;
1454 n = want_pages_array(pages, maxsize, *start, maxpages);
1455 if (!n)
1456 return -ENOMEM;
1457 res = get_user_pages_fast(addr, n, gup_flags, *pages);
1458 if (unlikely(res <= 0))
1459 return res;
1460 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1461 iov_iter_advance(i, maxsize);
1462 return maxsize;
1463 }
1464 if (iov_iter_is_bvec(i)) {
1465 struct page **p;
1466 struct page *page;
1467
1468 page = first_bvec_segment(i, &maxsize, start);
1469 n = want_pages_array(pages, maxsize, *start, maxpages);
1470 if (!n)
1471 return -ENOMEM;
1472 p = *pages;
1473 for (int k = 0; k < n; k++)
1474 get_page(p[k] = page + k);
1475 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1476 i->count -= maxsize;
1477 i->iov_offset += maxsize;
1478 if (i->iov_offset == i->bvec->bv_len) {
1479 i->iov_offset = 0;
1480 i->bvec++;
1481 i->nr_segs--;
1482 }
1483 return maxsize;
1484 }
1485 if (iov_iter_is_pipe(i))
1486 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1487 if (iov_iter_is_xarray(i))
1488 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1489 return -EFAULT;
1490 }
1491
1492 ssize_t iov_iter_get_pages2(struct iov_iter *i,
1493 struct page **pages, size_t maxsize, unsigned maxpages,
1494 size_t *start)
1495 {
1496 if (!maxpages)
1497 return 0;
1498 BUG_ON(!pages);
1499
1500 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
1501 }
1502 EXPORT_SYMBOL(iov_iter_get_pages2);
1503
1504 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1505 struct page ***pages, size_t maxsize,
1506 size_t *start)
1507 {
1508 ssize_t len;
1509
1510 *pages = NULL;
1511
1512 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
1513 if (len <= 0) {
1514 kvfree(*pages);
1515 *pages = NULL;
1516 }
1517 return len;
1518 }
1519 EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1520
1521 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1522 struct iov_iter *i)
1523 {
1524 __wsum sum, next;
1525 sum = *csum;
1526 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1527 WARN_ON(1);
1528 return 0;
1529 }
1530 iterate_and_advance(i, bytes, base, len, off, ({
1531 next = csum_and_copy_from_user(base, addr + off, len);
1532 sum = csum_block_add(sum, next, off);
1533 next ? 0 : len;
1534 }), ({
1535 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1536 })
1537 )
1538 *csum = sum;
1539 return bytes;
1540 }
1541 EXPORT_SYMBOL(csum_and_copy_from_iter);
1542
1543 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1544 struct iov_iter *i)
1545 {
1546 struct csum_state *csstate = _csstate;
1547 __wsum sum, next;
1548
1549 if (unlikely(iov_iter_is_discard(i))) {
1550 WARN_ON(1);
1551 return 0;
1552 }
1553
1554 sum = csum_shift(csstate->csum, csstate->off);
1555 if (unlikely(iov_iter_is_pipe(i)))
1556 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1557 else iterate_and_advance(i, bytes, base, len, off, ({
1558 next = csum_and_copy_to_user(addr + off, base, len);
1559 sum = csum_block_add(sum, next, off);
1560 next ? 0 : len;
1561 }), ({
1562 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1563 })
1564 )
1565 csstate->csum = csum_shift(sum, csstate->off);
1566 csstate->off += bytes;
1567 return bytes;
1568 }
1569 EXPORT_SYMBOL(csum_and_copy_to_iter);
1570
1571 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1572 struct iov_iter *i)
1573 {
1574 #ifdef CONFIG_CRYPTO_HASH
1575 struct ahash_request *hash = hashp;
1576 struct scatterlist sg;
1577 size_t copied;
1578
1579 copied = copy_to_iter(addr, bytes, i);
1580 sg_init_one(&sg, addr, copied);
1581 ahash_request_set_crypt(hash, &sg, NULL, copied);
1582 crypto_ahash_update(hash);
1583 return copied;
1584 #else
1585 return 0;
1586 #endif
1587 }
1588 EXPORT_SYMBOL(hash_and_copy_to_iter);
1589
1590 static int iov_npages(const struct iov_iter *i, int maxpages)
1591 {
1592 size_t skip = i->iov_offset, size = i->count;
1593 const struct iovec *p;
1594 int npages = 0;
1595
1596 for (p = i->iov; size; skip = 0, p++) {
1597 unsigned offs = offset_in_page(p->iov_base + skip);
1598 size_t len = min(p->iov_len - skip, size);
1599
1600 if (len) {
1601 size -= len;
1602 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1603 if (unlikely(npages > maxpages))
1604 return maxpages;
1605 }
1606 }
1607 return npages;
1608 }
1609
1610 static int bvec_npages(const struct iov_iter *i, int maxpages)
1611 {
1612 size_t skip = i->iov_offset, size = i->count;
1613 const struct bio_vec *p;
1614 int npages = 0;
1615
1616 for (p = i->bvec; size; skip = 0, p++) {
1617 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1618 size_t len = min(p->bv_len - skip, size);
1619
1620 size -= len;
1621 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1622 if (unlikely(npages > maxpages))
1623 return maxpages;
1624 }
1625 return npages;
1626 }
1627
1628 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1629 {
1630 if (unlikely(!i->count))
1631 return 0;
1632 if (likely(iter_is_ubuf(i))) {
1633 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1634 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1635 return min(npages, maxpages);
1636 }
1637
1638 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1639 return iov_npages(i, maxpages);
1640 if (iov_iter_is_bvec(i))
1641 return bvec_npages(i, maxpages);
1642 if (iov_iter_is_pipe(i)) {
1643 int npages;
1644
1645 if (!sanity(i))
1646 return 0;
1647
1648 pipe_npages(i, &npages);
1649 return min(npages, maxpages);
1650 }
1651 if (iov_iter_is_xarray(i)) {
1652 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1653 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1654 return min(npages, maxpages);
1655 }
1656 return 0;
1657 }
1658 EXPORT_SYMBOL(iov_iter_npages);
1659
1660 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1661 {
1662 *new = *old;
1663 if (unlikely(iov_iter_is_pipe(new))) {
1664 WARN_ON(1);
1665 return NULL;
1666 }
1667 if (iov_iter_is_bvec(new))
1668 return new->bvec = kmemdup(new->bvec,
1669 new->nr_segs * sizeof(struct bio_vec),
1670 flags);
1671 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1672
1673 return new->iov = kmemdup(new->iov,
1674 new->nr_segs * sizeof(struct iovec),
1675 flags);
1676 return NULL;
1677 }
1678 EXPORT_SYMBOL(dup_iter);
1679
1680 static int copy_compat_iovec_from_user(struct iovec *iov,
1681 const struct iovec __user *uvec, unsigned long nr_segs)
1682 {
1683 const struct compat_iovec __user *uiov =
1684 (const struct compat_iovec __user *)uvec;
1685 int ret = -EFAULT, i;
1686
1687 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1688 return -EFAULT;
1689
1690 for (i = 0; i < nr_segs; i++) {
1691 compat_uptr_t buf;
1692 compat_ssize_t len;
1693
1694 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1695 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1696
1697
1698 if (len < 0) {
1699 ret = -EINVAL;
1700 goto uaccess_end;
1701 }
1702 iov[i].iov_base = compat_ptr(buf);
1703 iov[i].iov_len = len;
1704 }
1705
1706 ret = 0;
1707 uaccess_end:
1708 user_access_end();
1709 return ret;
1710 }
1711
1712 static int copy_iovec_from_user(struct iovec *iov,
1713 const struct iovec __user *uvec, unsigned long nr_segs)
1714 {
1715 unsigned long seg;
1716
1717 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1718 return -EFAULT;
1719 for (seg = 0; seg < nr_segs; seg++) {
1720 if ((ssize_t)iov[seg].iov_len < 0)
1721 return -EINVAL;
1722 }
1723
1724 return 0;
1725 }
1726
1727 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1728 unsigned long nr_segs, unsigned long fast_segs,
1729 struct iovec *fast_iov, bool compat)
1730 {
1731 struct iovec *iov = fast_iov;
1732 int ret;
1733
1734
1735
1736
1737
1738
1739 if (nr_segs == 0)
1740 return iov;
1741 if (nr_segs > UIO_MAXIOV)
1742 return ERR_PTR(-EINVAL);
1743 if (nr_segs > fast_segs) {
1744 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1745 if (!iov)
1746 return ERR_PTR(-ENOMEM);
1747 }
1748
1749 if (compat)
1750 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1751 else
1752 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1753 if (ret) {
1754 if (iov != fast_iov)
1755 kfree(iov);
1756 return ERR_PTR(ret);
1757 }
1758
1759 return iov;
1760 }
1761
1762 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1763 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1764 struct iov_iter *i, bool compat)
1765 {
1766 ssize_t total_len = 0;
1767 unsigned long seg;
1768 struct iovec *iov;
1769
1770 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1771 if (IS_ERR(iov)) {
1772 *iovp = NULL;
1773 return PTR_ERR(iov);
1774 }
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784 for (seg = 0; seg < nr_segs; seg++) {
1785 ssize_t len = (ssize_t)iov[seg].iov_len;
1786
1787 if (!access_ok(iov[seg].iov_base, len)) {
1788 if (iov != *iovp)
1789 kfree(iov);
1790 *iovp = NULL;
1791 return -EFAULT;
1792 }
1793
1794 if (len > MAX_RW_COUNT - total_len) {
1795 len = MAX_RW_COUNT - total_len;
1796 iov[seg].iov_len = len;
1797 }
1798 total_len += len;
1799 }
1800
1801 iov_iter_init(i, type, iov, nr_segs, total_len);
1802 if (iov == *iovp)
1803 *iovp = NULL;
1804 else
1805 *iovp = iov;
1806 return total_len;
1807 }
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1832 unsigned nr_segs, unsigned fast_segs,
1833 struct iovec **iovp, struct iov_iter *i)
1834 {
1835 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1836 in_compat_syscall());
1837 }
1838 EXPORT_SYMBOL(import_iovec);
1839
1840 int import_single_range(int rw, void __user *buf, size_t len,
1841 struct iovec *iov, struct iov_iter *i)
1842 {
1843 if (len > MAX_RW_COUNT)
1844 len = MAX_RW_COUNT;
1845 if (unlikely(!access_ok(buf, len)))
1846 return -EFAULT;
1847
1848 iov->iov_base = buf;
1849 iov->iov_len = len;
1850 iov_iter_init(i, rw, iov, 1, len);
1851 return 0;
1852 }
1853 EXPORT_SYMBOL(import_single_range);
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1868 {
1869 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
1870 !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
1871 return;
1872 i->iov_offset = state->iov_offset;
1873 i->count = state->count;
1874 if (iter_is_ubuf(i))
1875 return;
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1886 if (iov_iter_is_bvec(i))
1887 i->bvec -= state->nr_segs - i->nr_segs;
1888 else
1889 i->iov -= state->nr_segs - i->nr_segs;
1890 i->nr_segs = state->nr_segs;
1891 }