0001
0002
0003
0004
0005
0006
0007 #include <linux/export.h>
0008 #include <linux/slab.h>
0009 #include <linux/scatterlist.h>
0010 #include <linux/highmem.h>
0011 #include <linux/kmemleak.h>
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 struct scatterlist *sg_next(struct scatterlist *sg)
0024 {
0025 if (sg_is_last(sg))
0026 return NULL;
0027
0028 sg++;
0029 if (unlikely(sg_is_chain(sg)))
0030 sg = sg_chain_ptr(sg);
0031
0032 return sg;
0033 }
0034 EXPORT_SYMBOL(sg_next);
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 int sg_nents(struct scatterlist *sg)
0046 {
0047 int nents;
0048 for (nents = 0; sg; sg = sg_next(sg))
0049 nents++;
0050 return nents;
0051 }
0052 EXPORT_SYMBOL(sg_nents);
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 int sg_nents_for_len(struct scatterlist *sg, u64 len)
0069 {
0070 int nents;
0071 u64 total;
0072
0073 if (!len)
0074 return 0;
0075
0076 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
0077 nents++;
0078 total += sg->length;
0079 if (total >= len)
0080 return nents;
0081 }
0082
0083 return -EINVAL;
0084 }
0085 EXPORT_SYMBOL(sg_nents_for_len);
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
0102 {
0103 struct scatterlist *sg, *ret = NULL;
0104 unsigned int i;
0105
0106 for_each_sg(sgl, sg, nents, i)
0107 ret = sg;
0108
0109 BUG_ON(!sg_is_last(ret));
0110 return ret;
0111 }
0112 EXPORT_SYMBOL(sg_last);
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
0125 {
0126 memset(sgl, 0, sizeof(*sgl) * nents);
0127 sg_init_marker(sgl, nents);
0128 }
0129 EXPORT_SYMBOL(sg_init_table);
0130
0131
0132
0133
0134
0135
0136
0137
0138 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
0139 {
0140 sg_init_table(sg, 1);
0141 sg_set_buf(sg, buf, buflen);
0142 }
0143 EXPORT_SYMBOL(sg_init_one);
0144
0145
0146
0147
0148
0149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
0150 {
0151 if (nents == SG_MAX_SINGLE_ALLOC) {
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161 void *ptr = (void *) __get_free_page(gfp_mask);
0162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
0163 return ptr;
0164 } else
0165 return kmalloc_array(nents, sizeof(struct scatterlist),
0166 gfp_mask);
0167 }
0168
0169 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
0170 {
0171 if (nents == SG_MAX_SINGLE_ALLOC) {
0172 kmemleak_free(sg);
0173 free_page((unsigned long) sg);
0174 } else
0175 kfree(sg);
0176 }
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
0194 unsigned int nents_first_chunk, sg_free_fn *free_fn,
0195 unsigned int num_ents)
0196 {
0197 struct scatterlist *sgl, *next;
0198 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
0199
0200 if (unlikely(!table->sgl))
0201 return;
0202
0203 sgl = table->sgl;
0204 while (num_ents) {
0205 unsigned int alloc_size = num_ents;
0206 unsigned int sg_size;
0207
0208
0209
0210
0211
0212
0213
0214 if (alloc_size > curr_max_ents) {
0215 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
0216 alloc_size = curr_max_ents;
0217 sg_size = alloc_size - 1;
0218 } else {
0219 sg_size = alloc_size;
0220 next = NULL;
0221 }
0222
0223 num_ents -= sg_size;
0224 if (nents_first_chunk)
0225 nents_first_chunk = 0;
0226 else
0227 free_fn(sgl, alloc_size);
0228 sgl = next;
0229 curr_max_ents = max_ents;
0230 }
0231
0232 table->sgl = NULL;
0233 }
0234 EXPORT_SYMBOL(__sg_free_table);
0235
0236
0237
0238
0239
0240
0241 void sg_free_append_table(struct sg_append_table *table)
0242 {
0243 __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
0244 table->total_nents);
0245 }
0246 EXPORT_SYMBOL(sg_free_append_table);
0247
0248
0249
0250
0251
0252
0253
0254 void sg_free_table(struct sg_table *table)
0255 {
0256 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
0257 table->orig_nents);
0258 }
0259 EXPORT_SYMBOL(sg_free_table);
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
0283 unsigned int max_ents, struct scatterlist *first_chunk,
0284 unsigned int nents_first_chunk, gfp_t gfp_mask,
0285 sg_alloc_fn *alloc_fn)
0286 {
0287 struct scatterlist *sg, *prv;
0288 unsigned int left;
0289 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
0290 unsigned prv_max_ents;
0291
0292 memset(table, 0, sizeof(*table));
0293
0294 if (nents == 0)
0295 return -EINVAL;
0296 #ifdef CONFIG_ARCH_NO_SG_CHAIN
0297 if (WARN_ON_ONCE(nents > max_ents))
0298 return -EINVAL;
0299 #endif
0300
0301 left = nents;
0302 prv = NULL;
0303 do {
0304 unsigned int sg_size, alloc_size = left;
0305
0306 if (alloc_size > curr_max_ents) {
0307 alloc_size = curr_max_ents;
0308 sg_size = alloc_size - 1;
0309 } else
0310 sg_size = alloc_size;
0311
0312 left -= sg_size;
0313
0314 if (first_chunk) {
0315 sg = first_chunk;
0316 first_chunk = NULL;
0317 } else {
0318 sg = alloc_fn(alloc_size, gfp_mask);
0319 }
0320 if (unlikely(!sg)) {
0321
0322
0323
0324
0325
0326
0327 if (prv)
0328 table->nents = ++table->orig_nents;
0329
0330 return -ENOMEM;
0331 }
0332
0333 sg_init_table(sg, alloc_size);
0334 table->nents = table->orig_nents += sg_size;
0335
0336
0337
0338
0339
0340 if (prv)
0341 sg_chain(prv, prv_max_ents, sg);
0342 else
0343 table->sgl = sg;
0344
0345
0346
0347
0348 if (!left)
0349 sg_mark_end(&sg[sg_size - 1]);
0350
0351 prv = sg;
0352 prv_max_ents = curr_max_ents;
0353 curr_max_ents = max_ents;
0354 } while (left);
0355
0356 return 0;
0357 }
0358 EXPORT_SYMBOL(__sg_alloc_table);
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
0372 {
0373 int ret;
0374
0375 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
0376 NULL, 0, gfp_mask, sg_kmalloc);
0377 if (unlikely(ret))
0378 sg_free_table(table);
0379 return ret;
0380 }
0381 EXPORT_SYMBOL(sg_alloc_table);
0382
0383 static struct scatterlist *get_next_sg(struct sg_append_table *table,
0384 struct scatterlist *cur,
0385 unsigned long needed_sges,
0386 gfp_t gfp_mask)
0387 {
0388 struct scatterlist *new_sg, *next_sg;
0389 unsigned int alloc_size;
0390
0391 if (cur) {
0392 next_sg = sg_next(cur);
0393
0394 if (!sg_is_last(next_sg) || needed_sges == 1)
0395 return next_sg;
0396 }
0397
0398 alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
0399 new_sg = sg_kmalloc(alloc_size, gfp_mask);
0400 if (!new_sg)
0401 return ERR_PTR(-ENOMEM);
0402 sg_init_table(new_sg, alloc_size);
0403 if (cur) {
0404 table->total_nents += alloc_size - 1;
0405 __sg_chain(next_sg, new_sg);
0406 } else {
0407 table->sgt.sgl = new_sg;
0408 table->total_nents = alloc_size;
0409 }
0410 return new_sg;
0411 }
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442 int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
0443 struct page **pages, unsigned int n_pages, unsigned int offset,
0444 unsigned long size, unsigned int max_segment,
0445 unsigned int left_pages, gfp_t gfp_mask)
0446 {
0447 unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
0448 unsigned int added_nents = 0;
0449 struct scatterlist *s = sgt_append->prv;
0450
0451
0452
0453
0454
0455 max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
0456 if (WARN_ON(max_segment < PAGE_SIZE))
0457 return -EINVAL;
0458
0459 if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
0460 return -EOPNOTSUPP;
0461
0462 if (sgt_append->prv) {
0463 unsigned long paddr =
0464 (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE +
0465 sgt_append->prv->offset + sgt_append->prv->length) /
0466 PAGE_SIZE;
0467
0468 if (WARN_ON(offset))
0469 return -EINVAL;
0470
0471
0472 prv_len = sgt_append->prv->length;
0473 while (n_pages && page_to_pfn(pages[0]) == paddr) {
0474 if (sgt_append->prv->length + PAGE_SIZE > max_segment)
0475 break;
0476 sgt_append->prv->length += PAGE_SIZE;
0477 paddr++;
0478 pages++;
0479 n_pages--;
0480 }
0481 if (!n_pages)
0482 goto out;
0483 }
0484
0485
0486 chunks = 1;
0487 seg_len = 0;
0488 for (i = 1; i < n_pages; i++) {
0489 seg_len += PAGE_SIZE;
0490 if (seg_len >= max_segment ||
0491 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
0492 chunks++;
0493 seg_len = 0;
0494 }
0495 }
0496
0497
0498 cur_page = 0;
0499 for (i = 0; i < chunks; i++) {
0500 unsigned int j, chunk_size;
0501
0502
0503 seg_len = 0;
0504 for (j = cur_page + 1; j < n_pages; j++) {
0505 seg_len += PAGE_SIZE;
0506 if (seg_len >= max_segment ||
0507 page_to_pfn(pages[j]) !=
0508 page_to_pfn(pages[j - 1]) + 1)
0509 break;
0510 }
0511
0512
0513 s = get_next_sg(sgt_append, s, chunks - i + left_pages,
0514 gfp_mask);
0515 if (IS_ERR(s)) {
0516
0517
0518
0519
0520 if (sgt_append->prv)
0521 sgt_append->prv->length = prv_len;
0522 return PTR_ERR(s);
0523 }
0524 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
0525 sg_set_page(s, pages[cur_page],
0526 min_t(unsigned long, size, chunk_size), offset);
0527 added_nents++;
0528 size -= chunk_size;
0529 offset = 0;
0530 cur_page = j;
0531 }
0532 sgt_append->sgt.nents += added_nents;
0533 sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
0534 sgt_append->prv = s;
0535 out:
0536 if (!left_pages)
0537 sg_mark_end(s);
0538 return 0;
0539 }
0540 EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565 int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
0566 unsigned int n_pages, unsigned int offset,
0567 unsigned long size, unsigned int max_segment,
0568 gfp_t gfp_mask)
0569 {
0570 struct sg_append_table append = {};
0571 int err;
0572
0573 err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
0574 size, max_segment, 0, gfp_mask);
0575 if (err) {
0576 sg_free_append_table(&append);
0577 return err;
0578 }
0579 memcpy(sgt, &append.sgt, sizeof(*sgt));
0580 WARN_ON(append.total_nents != sgt->orig_nents);
0581 return 0;
0582 }
0583 EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
0584
0585 #ifdef CONFIG_SGL_ALLOC
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598 struct scatterlist *sgl_alloc_order(unsigned long long length,
0599 unsigned int order, bool chainable,
0600 gfp_t gfp, unsigned int *nent_p)
0601 {
0602 struct scatterlist *sgl, *sg;
0603 struct page *page;
0604 unsigned int nent, nalloc;
0605 u32 elem_len;
0606
0607 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
0608
0609 if (length > (nent << (PAGE_SHIFT + order)))
0610 return NULL;
0611 nalloc = nent;
0612 if (chainable) {
0613
0614 if (nalloc + 1 < nalloc)
0615 return NULL;
0616 nalloc++;
0617 }
0618 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
0619 gfp & ~GFP_DMA);
0620 if (!sgl)
0621 return NULL;
0622
0623 sg_init_table(sgl, nalloc);
0624 sg = sgl;
0625 while (length) {
0626 elem_len = min_t(u64, length, PAGE_SIZE << order);
0627 page = alloc_pages(gfp, order);
0628 if (!page) {
0629 sgl_free_order(sgl, order);
0630 return NULL;
0631 }
0632
0633 sg_set_page(sg, page, elem_len, 0);
0634 length -= elem_len;
0635 sg = sg_next(sg);
0636 }
0637 WARN_ONCE(length, "length = %lld\n", length);
0638 if (nent_p)
0639 *nent_p = nent;
0640 return sgl;
0641 }
0642 EXPORT_SYMBOL(sgl_alloc_order);
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
0653 unsigned int *nent_p)
0654 {
0655 return sgl_alloc_order(length, 0, false, gfp, nent_p);
0656 }
0657 EXPORT_SYMBOL(sgl_alloc);
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
0673 {
0674 struct scatterlist *sg;
0675 struct page *page;
0676 int i;
0677
0678 for_each_sg(sgl, sg, nents, i) {
0679 if (!sg)
0680 break;
0681 page = sg_page(sg);
0682 if (page)
0683 __free_pages(page, order);
0684 }
0685 kfree(sgl);
0686 }
0687 EXPORT_SYMBOL(sgl_free_n_order);
0688
0689
0690
0691
0692
0693
0694 void sgl_free_order(struct scatterlist *sgl, int order)
0695 {
0696 sgl_free_n_order(sgl, INT_MAX, order);
0697 }
0698 EXPORT_SYMBOL(sgl_free_order);
0699
0700
0701
0702
0703
0704 void sgl_free(struct scatterlist *sgl)
0705 {
0706 sgl_free_order(sgl, 0);
0707 }
0708 EXPORT_SYMBOL(sgl_free);
0709
0710 #endif
0711
0712 void __sg_page_iter_start(struct sg_page_iter *piter,
0713 struct scatterlist *sglist, unsigned int nents,
0714 unsigned long pgoffset)
0715 {
0716 piter->__pg_advance = 0;
0717 piter->__nents = nents;
0718
0719 piter->sg = sglist;
0720 piter->sg_pgoffset = pgoffset;
0721 }
0722 EXPORT_SYMBOL(__sg_page_iter_start);
0723
0724 static int sg_page_count(struct scatterlist *sg)
0725 {
0726 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
0727 }
0728
0729 bool __sg_page_iter_next(struct sg_page_iter *piter)
0730 {
0731 if (!piter->__nents || !piter->sg)
0732 return false;
0733
0734 piter->sg_pgoffset += piter->__pg_advance;
0735 piter->__pg_advance = 1;
0736
0737 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
0738 piter->sg_pgoffset -= sg_page_count(piter->sg);
0739 piter->sg = sg_next(piter->sg);
0740 if (!--piter->__nents || !piter->sg)
0741 return false;
0742 }
0743
0744 return true;
0745 }
0746 EXPORT_SYMBOL(__sg_page_iter_next);
0747
0748 static int sg_dma_page_count(struct scatterlist *sg)
0749 {
0750 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
0751 }
0752
0753 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
0754 {
0755 struct sg_page_iter *piter = &dma_iter->base;
0756
0757 if (!piter->__nents || !piter->sg)
0758 return false;
0759
0760 piter->sg_pgoffset += piter->__pg_advance;
0761 piter->__pg_advance = 1;
0762
0763 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
0764 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
0765 piter->sg = sg_next(piter->sg);
0766 if (!--piter->__nents || !piter->sg)
0767 return false;
0768 }
0769
0770 return true;
0771 }
0772 EXPORT_SYMBOL(__sg_page_iter_dma_next);
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
0787 unsigned int nents, unsigned int flags)
0788 {
0789 memset(miter, 0, sizeof(struct sg_mapping_iter));
0790
0791 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
0792 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
0793 miter->__flags = flags;
0794 }
0795 EXPORT_SYMBOL(sg_miter_start);
0796
0797 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
0798 {
0799 if (!miter->__remaining) {
0800 struct scatterlist *sg;
0801
0802 if (!__sg_page_iter_next(&miter->piter))
0803 return false;
0804
0805 sg = miter->piter.sg;
0806
0807 miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
0808 miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
0809 miter->__offset &= PAGE_SIZE - 1;
0810 miter->__remaining = sg->offset + sg->length -
0811 (miter->piter.sg_pgoffset << PAGE_SHIFT) -
0812 miter->__offset;
0813 miter->__remaining = min_t(unsigned long, miter->__remaining,
0814 PAGE_SIZE - miter->__offset);
0815 }
0816
0817 return true;
0818 }
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
0838 {
0839 sg_miter_stop(miter);
0840
0841 while (offset) {
0842 off_t consumed;
0843
0844 if (!sg_miter_get_next_page(miter))
0845 return false;
0846
0847 consumed = min_t(off_t, offset, miter->__remaining);
0848 miter->__offset += consumed;
0849 miter->__remaining -= consumed;
0850 offset -= consumed;
0851 }
0852
0853 return true;
0854 }
0855 EXPORT_SYMBOL(sg_miter_skip);
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873 bool sg_miter_next(struct sg_mapping_iter *miter)
0874 {
0875 sg_miter_stop(miter);
0876
0877
0878
0879
0880
0881 if (!sg_miter_get_next_page(miter))
0882 return false;
0883
0884 miter->page = sg_page_iter_page(&miter->piter);
0885 miter->consumed = miter->length = miter->__remaining;
0886
0887 if (miter->__flags & SG_MITER_ATOMIC)
0888 miter->addr = kmap_atomic(miter->page) + miter->__offset;
0889 else
0890 miter->addr = kmap(miter->page) + miter->__offset;
0891
0892 return true;
0893 }
0894 EXPORT_SYMBOL(sg_miter_next);
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 void sg_miter_stop(struct sg_mapping_iter *miter)
0910 {
0911 WARN_ON(miter->consumed > miter->length);
0912
0913
0914 if (miter->addr) {
0915 miter->__offset += miter->consumed;
0916 miter->__remaining -= miter->consumed;
0917
0918 if (miter->__flags & SG_MITER_TO_SG)
0919 flush_dcache_page(miter->page);
0920
0921 if (miter->__flags & SG_MITER_ATOMIC) {
0922 WARN_ON_ONCE(!pagefault_disabled());
0923 kunmap_atomic(miter->addr);
0924 } else
0925 kunmap(miter->page);
0926
0927 miter->page = NULL;
0928 miter->addr = NULL;
0929 miter->length = 0;
0930 miter->consumed = 0;
0931 }
0932 }
0933 EXPORT_SYMBOL(sg_miter_stop);
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
0949 size_t buflen, off_t skip, bool to_buffer)
0950 {
0951 unsigned int offset = 0;
0952 struct sg_mapping_iter miter;
0953 unsigned int sg_flags = SG_MITER_ATOMIC;
0954
0955 if (to_buffer)
0956 sg_flags |= SG_MITER_FROM_SG;
0957 else
0958 sg_flags |= SG_MITER_TO_SG;
0959
0960 sg_miter_start(&miter, sgl, nents, sg_flags);
0961
0962 if (!sg_miter_skip(&miter, skip))
0963 return 0;
0964
0965 while ((offset < buflen) && sg_miter_next(&miter)) {
0966 unsigned int len;
0967
0968 len = min(miter.length, buflen - offset);
0969
0970 if (to_buffer)
0971 memcpy(buf + offset, miter.addr, len);
0972 else
0973 memcpy(miter.addr, buf + offset, len);
0974
0975 offset += len;
0976 }
0977
0978 sg_miter_stop(&miter);
0979
0980 return offset;
0981 }
0982 EXPORT_SYMBOL(sg_copy_buffer);
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
0995 const void *buf, size_t buflen)
0996 {
0997 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
0998 }
0999 EXPORT_SYMBOL(sg_copy_from_buffer);
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1012 void *buf, size_t buflen)
1013 {
1014 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
1015 }
1016 EXPORT_SYMBOL(sg_copy_to_buffer);
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1030 const void *buf, size_t buflen, off_t skip)
1031 {
1032 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
1033 }
1034 EXPORT_SYMBOL(sg_pcopy_from_buffer);
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1048 void *buf, size_t buflen, off_t skip)
1049 {
1050 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
1051 }
1052 EXPORT_SYMBOL(sg_pcopy_to_buffer);
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
1064 size_t buflen, off_t skip)
1065 {
1066 unsigned int offset = 0;
1067 struct sg_mapping_iter miter;
1068 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1069
1070 sg_miter_start(&miter, sgl, nents, sg_flags);
1071
1072 if (!sg_miter_skip(&miter, skip))
1073 return false;
1074
1075 while (offset < buflen && sg_miter_next(&miter)) {
1076 unsigned int len;
1077
1078 len = min(miter.length, buflen - offset);
1079 memset(miter.addr, 0, len);
1080
1081 offset += len;
1082 }
1083
1084 sg_miter_stop(&miter);
1085 return offset;
1086 }
1087 EXPORT_SYMBOL(sg_zero_buffer);