Back to home page

LXR

 
 

    


0001 /*
0002  * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
0003  *
0004  * Scatterlist handling helpers.
0005  *
0006  * This source code is licensed under the GNU General Public License,
0007  * Version 2. See the file COPYING for more details.
0008  */
0009 #include <linux/export.h>
0010 #include <linux/slab.h>
0011 #include <linux/scatterlist.h>
0012 #include <linux/highmem.h>
0013 #include <linux/kmemleak.h>
0014 
0015 /**
0016  * sg_next - return the next scatterlist entry in a list
0017  * @sg:     The current sg entry
0018  *
0019  * Description:
0020  *   Usually the next entry will be @sg@ + 1, but if this sg element is part
0021  *   of a chained scatterlist, it could jump to the start of a new
0022  *   scatterlist array.
0023  *
0024  **/
0025 struct scatterlist *sg_next(struct scatterlist *sg)
0026 {
0027 #ifdef CONFIG_DEBUG_SG
0028     BUG_ON(sg->sg_magic != SG_MAGIC);
0029 #endif
0030     if (sg_is_last(sg))
0031         return NULL;
0032 
0033     sg++;
0034     if (unlikely(sg_is_chain(sg)))
0035         sg = sg_chain_ptr(sg);
0036 
0037     return sg;
0038 }
0039 EXPORT_SYMBOL(sg_next);
0040 
0041 /**
0042  * sg_nents - return total count of entries in scatterlist
0043  * @sg:     The scatterlist
0044  *
0045  * Description:
0046  * Allows to know how many entries are in sg, taking into acount
0047  * chaining as well
0048  *
0049  **/
0050 int sg_nents(struct scatterlist *sg)
0051 {
0052     int nents;
0053     for (nents = 0; sg; sg = sg_next(sg))
0054         nents++;
0055     return nents;
0056 }
0057 EXPORT_SYMBOL(sg_nents);
0058 
0059 /**
0060  * sg_nents_for_len - return total count of entries in scatterlist
0061  *                    needed to satisfy the supplied length
0062  * @sg:     The scatterlist
0063  * @len:    The total required length
0064  *
0065  * Description:
0066  * Determines the number of entries in sg that are required to meet
0067  * the supplied length, taking into acount chaining as well
0068  *
0069  * Returns:
0070  *   the number of sg entries needed, negative error on failure
0071  *
0072  **/
0073 int sg_nents_for_len(struct scatterlist *sg, u64 len)
0074 {
0075     int nents;
0076     u64 total;
0077 
0078     if (!len)
0079         return 0;
0080 
0081     for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
0082         nents++;
0083         total += sg->length;
0084         if (total >= len)
0085             return nents;
0086     }
0087 
0088     return -EINVAL;
0089 }
0090 EXPORT_SYMBOL(sg_nents_for_len);
0091 
0092 /**
0093  * sg_last - return the last scatterlist entry in a list
0094  * @sgl:    First entry in the scatterlist
0095  * @nents:  Number of entries in the scatterlist
0096  *
0097  * Description:
0098  *   Should only be used casually, it (currently) scans the entire list
0099  *   to get the last entry.
0100  *
0101  *   Note that the @sgl@ pointer passed in need not be the first one,
0102  *   the important bit is that @nents@ denotes the number of entries that
0103  *   exist from @sgl@.
0104  *
0105  **/
0106 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
0107 {
0108     struct scatterlist *sg, *ret = NULL;
0109     unsigned int i;
0110 
0111     for_each_sg(sgl, sg, nents, i)
0112         ret = sg;
0113 
0114 #ifdef CONFIG_DEBUG_SG
0115     BUG_ON(sgl[0].sg_magic != SG_MAGIC);
0116     BUG_ON(!sg_is_last(ret));
0117 #endif
0118     return ret;
0119 }
0120 EXPORT_SYMBOL(sg_last);
0121 
0122 /**
0123  * sg_init_table - Initialize SG table
0124  * @sgl:       The SG table
0125  * @nents:     Number of entries in table
0126  *
0127  * Notes:
0128  *   If this is part of a chained sg table, sg_mark_end() should be
0129  *   used only on the last table part.
0130  *
0131  **/
0132 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
0133 {
0134     memset(sgl, 0, sizeof(*sgl) * nents);
0135 #ifdef CONFIG_DEBUG_SG
0136     {
0137         unsigned int i;
0138         for (i = 0; i < nents; i++)
0139             sgl[i].sg_magic = SG_MAGIC;
0140     }
0141 #endif
0142     sg_mark_end(&sgl[nents - 1]);
0143 }
0144 EXPORT_SYMBOL(sg_init_table);
0145 
0146 /**
0147  * sg_init_one - Initialize a single entry sg list
0148  * @sg:      SG entry
0149  * @buf:     Virtual address for IO
0150  * @buflen:  IO length
0151  *
0152  **/
0153 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
0154 {
0155     sg_init_table(sg, 1);
0156     sg_set_buf(sg, buf, buflen);
0157 }
0158 EXPORT_SYMBOL(sg_init_one);
0159 
0160 /*
0161  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
0162  * helpers.
0163  */
0164 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
0165 {
0166     if (nents == SG_MAX_SINGLE_ALLOC) {
0167         /*
0168          * Kmemleak doesn't track page allocations as they are not
0169          * commonly used (in a raw form) for kernel data structures.
0170          * As we chain together a list of pages and then a normal
0171          * kmalloc (tracked by kmemleak), in order to for that last
0172          * allocation not to become decoupled (and thus a
0173          * false-positive) we need to inform kmemleak of all the
0174          * intermediate allocations.
0175          */
0176         void *ptr = (void *) __get_free_page(gfp_mask);
0177         kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
0178         return ptr;
0179     } else
0180         return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
0181 }
0182 
0183 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
0184 {
0185     if (nents == SG_MAX_SINGLE_ALLOC) {
0186         kmemleak_free(sg);
0187         free_page((unsigned long) sg);
0188     } else
0189         kfree(sg);
0190 }
0191 
0192 /**
0193  * __sg_free_table - Free a previously mapped sg table
0194  * @table:  The sg table header to use
0195  * @max_ents:   The maximum number of entries per single scatterlist
0196  * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
0197  * @free_fn:    Free function
0198  *
0199  *  Description:
0200  *    Free an sg table previously allocated and setup with
0201  *    __sg_alloc_table().  The @max_ents value must be identical to
0202  *    that previously used with __sg_alloc_table().
0203  *
0204  **/
0205 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
0206              bool skip_first_chunk, sg_free_fn *free_fn)
0207 {
0208     struct scatterlist *sgl, *next;
0209 
0210     if (unlikely(!table->sgl))
0211         return;
0212 
0213     sgl = table->sgl;
0214     while (table->orig_nents) {
0215         unsigned int alloc_size = table->orig_nents;
0216         unsigned int sg_size;
0217 
0218         /*
0219          * If we have more than max_ents segments left,
0220          * then assign 'next' to the sg table after the current one.
0221          * sg_size is then one less than alloc size, since the last
0222          * element is the chain pointer.
0223          */
0224         if (alloc_size > max_ents) {
0225             next = sg_chain_ptr(&sgl[max_ents - 1]);
0226             alloc_size = max_ents;
0227             sg_size = alloc_size - 1;
0228         } else {
0229             sg_size = alloc_size;
0230             next = NULL;
0231         }
0232 
0233         table->orig_nents -= sg_size;
0234         if (skip_first_chunk)
0235             skip_first_chunk = false;
0236         else
0237             free_fn(sgl, alloc_size);
0238         sgl = next;
0239     }
0240 
0241     table->sgl = NULL;
0242 }
0243 EXPORT_SYMBOL(__sg_free_table);
0244 
0245 /**
0246  * sg_free_table - Free a previously allocated sg table
0247  * @table:  The mapped sg table header
0248  *
0249  **/
0250 void sg_free_table(struct sg_table *table)
0251 {
0252     __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
0253 }
0254 EXPORT_SYMBOL(sg_free_table);
0255 
0256 /**
0257  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
0258  * @table:  The sg table header to use
0259  * @nents:  Number of entries in sg list
0260  * @max_ents:   The maximum number of entries the allocator returns per call
0261  * @gfp_mask:   GFP allocation mask
0262  * @alloc_fn:   Allocator to use
0263  *
0264  * Description:
0265  *   This function returns a @table @nents long. The allocator is
0266  *   defined to return scatterlist chunks of maximum size @max_ents.
0267  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
0268  *   chained in units of @max_ents.
0269  *
0270  * Notes:
0271  *   If this function returns non-0 (eg failure), the caller must call
0272  *   __sg_free_table() to cleanup any leftover allocations.
0273  *
0274  **/
0275 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
0276              unsigned int max_ents, struct scatterlist *first_chunk,
0277              gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
0278 {
0279     struct scatterlist *sg, *prv;
0280     unsigned int left;
0281 
0282     memset(table, 0, sizeof(*table));
0283 
0284     if (nents == 0)
0285         return -EINVAL;
0286 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
0287     if (WARN_ON_ONCE(nents > max_ents))
0288         return -EINVAL;
0289 #endif
0290 
0291     left = nents;
0292     prv = NULL;
0293     do {
0294         unsigned int sg_size, alloc_size = left;
0295 
0296         if (alloc_size > max_ents) {
0297             alloc_size = max_ents;
0298             sg_size = alloc_size - 1;
0299         } else
0300             sg_size = alloc_size;
0301 
0302         left -= sg_size;
0303 
0304         if (first_chunk) {
0305             sg = first_chunk;
0306             first_chunk = NULL;
0307         } else {
0308             sg = alloc_fn(alloc_size, gfp_mask);
0309         }
0310         if (unlikely(!sg)) {
0311             /*
0312              * Adjust entry count to reflect that the last
0313              * entry of the previous table won't be used for
0314              * linkage.  Without this, sg_kfree() may get
0315              * confused.
0316              */
0317             if (prv)
0318                 table->nents = ++table->orig_nents;
0319 
0320             return -ENOMEM;
0321         }
0322 
0323         sg_init_table(sg, alloc_size);
0324         table->nents = table->orig_nents += sg_size;
0325 
0326         /*
0327          * If this is the first mapping, assign the sg table header.
0328          * If this is not the first mapping, chain previous part.
0329          */
0330         if (prv)
0331             sg_chain(prv, max_ents, sg);
0332         else
0333             table->sgl = sg;
0334 
0335         /*
0336          * If no more entries after this one, mark the end
0337          */
0338         if (!left)
0339             sg_mark_end(&sg[sg_size - 1]);
0340 
0341         prv = sg;
0342     } while (left);
0343 
0344     return 0;
0345 }
0346 EXPORT_SYMBOL(__sg_alloc_table);
0347 
0348 /**
0349  * sg_alloc_table - Allocate and initialize an sg table
0350  * @table:  The sg table header to use
0351  * @nents:  Number of entries in sg list
0352  * @gfp_mask:   GFP allocation mask
0353  *
0354  *  Description:
0355  *    Allocate and initialize an sg table. If @nents@ is larger than
0356  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
0357  *
0358  **/
0359 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
0360 {
0361     int ret;
0362 
0363     ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
0364                    NULL, gfp_mask, sg_kmalloc);
0365     if (unlikely(ret))
0366         __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
0367 
0368     return ret;
0369 }
0370 EXPORT_SYMBOL(sg_alloc_table);
0371 
0372 /**
0373  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
0374  *                 an array of pages
0375  * @sgt:    The sg table header to use
0376  * @pages:  Pointer to an array of page pointers
0377  * @n_pages:    Number of pages in the pages array
0378  * @offset:     Offset from start of the first page to the start of a buffer
0379  * @size:       Number of valid bytes in the buffer (after offset)
0380  * @gfp_mask:   GFP allocation mask
0381  *
0382  *  Description:
0383  *    Allocate and initialize an sg table from a list of pages. Contiguous
0384  *    ranges of the pages are squashed into a single scatterlist node. A user
0385  *    may provide an offset at a start and a size of valid data in a buffer
0386  *    specified by the page array. The returned sg table is released by
0387  *    sg_free_table.
0388  *
0389  * Returns:
0390  *   0 on success, negative error on failure
0391  */
0392 int sg_alloc_table_from_pages(struct sg_table *sgt,
0393     struct page **pages, unsigned int n_pages,
0394     unsigned long offset, unsigned long size,
0395     gfp_t gfp_mask)
0396 {
0397     unsigned int chunks;
0398     unsigned int i;
0399     unsigned int cur_page;
0400     int ret;
0401     struct scatterlist *s;
0402 
0403     /* compute number of contiguous chunks */
0404     chunks = 1;
0405     for (i = 1; i < n_pages; ++i)
0406         if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
0407             ++chunks;
0408 
0409     ret = sg_alloc_table(sgt, chunks, gfp_mask);
0410     if (unlikely(ret))
0411         return ret;
0412 
0413     /* merging chunks and putting them into the scatterlist */
0414     cur_page = 0;
0415     for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
0416         unsigned long chunk_size;
0417         unsigned int j;
0418 
0419         /* look for the end of the current chunk */
0420         for (j = cur_page + 1; j < n_pages; ++j)
0421             if (page_to_pfn(pages[j]) !=
0422                 page_to_pfn(pages[j - 1]) + 1)
0423                 break;
0424 
0425         chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
0426         sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
0427         size -= chunk_size;
0428         offset = 0;
0429         cur_page = j;
0430     }
0431 
0432     return 0;
0433 }
0434 EXPORT_SYMBOL(sg_alloc_table_from_pages);
0435 
0436 void __sg_page_iter_start(struct sg_page_iter *piter,
0437               struct scatterlist *sglist, unsigned int nents,
0438               unsigned long pgoffset)
0439 {
0440     piter->__pg_advance = 0;
0441     piter->__nents = nents;
0442 
0443     piter->sg = sglist;
0444     piter->sg_pgoffset = pgoffset;
0445 }
0446 EXPORT_SYMBOL(__sg_page_iter_start);
0447 
0448 static int sg_page_count(struct scatterlist *sg)
0449 {
0450     return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
0451 }
0452 
0453 bool __sg_page_iter_next(struct sg_page_iter *piter)
0454 {
0455     if (!piter->__nents || !piter->sg)
0456         return false;
0457 
0458     piter->sg_pgoffset += piter->__pg_advance;
0459     piter->__pg_advance = 1;
0460 
0461     while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
0462         piter->sg_pgoffset -= sg_page_count(piter->sg);
0463         piter->sg = sg_next(piter->sg);
0464         if (!--piter->__nents || !piter->sg)
0465             return false;
0466     }
0467 
0468     return true;
0469 }
0470 EXPORT_SYMBOL(__sg_page_iter_next);
0471 
0472 /**
0473  * sg_miter_start - start mapping iteration over a sg list
0474  * @miter: sg mapping iter to be started
0475  * @sgl: sg list to iterate over
0476  * @nents: number of sg entries
0477  *
0478  * Description:
0479  *   Starts mapping iterator @miter.
0480  *
0481  * Context:
0482  *   Don't care.
0483  */
0484 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
0485             unsigned int nents, unsigned int flags)
0486 {
0487     memset(miter, 0, sizeof(struct sg_mapping_iter));
0488 
0489     __sg_page_iter_start(&miter->piter, sgl, nents, 0);
0490     WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
0491     miter->__flags = flags;
0492 }
0493 EXPORT_SYMBOL(sg_miter_start);
0494 
0495 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
0496 {
0497     if (!miter->__remaining) {
0498         struct scatterlist *sg;
0499         unsigned long pgoffset;
0500 
0501         if (!__sg_page_iter_next(&miter->piter))
0502             return false;
0503 
0504         sg = miter->piter.sg;
0505         pgoffset = miter->piter.sg_pgoffset;
0506 
0507         miter->__offset = pgoffset ? 0 : sg->offset;
0508         miter->__remaining = sg->offset + sg->length -
0509                 (pgoffset << PAGE_SHIFT) - miter->__offset;
0510         miter->__remaining = min_t(unsigned long, miter->__remaining,
0511                        PAGE_SIZE - miter->__offset);
0512     }
0513 
0514     return true;
0515 }
0516 
0517 /**
0518  * sg_miter_skip - reposition mapping iterator
0519  * @miter: sg mapping iter to be skipped
0520  * @offset: number of bytes to plus the current location
0521  *
0522  * Description:
0523  *   Sets the offset of @miter to its current location plus @offset bytes.
0524  *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
0525  *   stops @miter.
0526  *
0527  * Context:
0528  *   Don't care if @miter is stopped, or not proceeded yet.
0529  *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
0530  *
0531  * Returns:
0532  *   true if @miter contains the valid mapping.  false if end of sg
0533  *   list is reached.
0534  */
0535 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
0536 {
0537     sg_miter_stop(miter);
0538 
0539     while (offset) {
0540         off_t consumed;
0541 
0542         if (!sg_miter_get_next_page(miter))
0543             return false;
0544 
0545         consumed = min_t(off_t, offset, miter->__remaining);
0546         miter->__offset += consumed;
0547         miter->__remaining -= consumed;
0548         offset -= consumed;
0549     }
0550 
0551     return true;
0552 }
0553 EXPORT_SYMBOL(sg_miter_skip);
0554 
0555 /**
0556  * sg_miter_next - proceed mapping iterator to the next mapping
0557  * @miter: sg mapping iter to proceed
0558  *
0559  * Description:
0560  *   Proceeds @miter to the next mapping.  @miter should have been started
0561  *   using sg_miter_start().  On successful return, @miter->page,
0562  *   @miter->addr and @miter->length point to the current mapping.
0563  *
0564  * Context:
0565  *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
0566  *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
0567  *
0568  * Returns:
0569  *   true if @miter contains the next mapping.  false if end of sg
0570  *   list is reached.
0571  */
0572 bool sg_miter_next(struct sg_mapping_iter *miter)
0573 {
0574     sg_miter_stop(miter);
0575 
0576     /*
0577      * Get to the next page if necessary.
0578      * __remaining, __offset is adjusted by sg_miter_stop
0579      */
0580     if (!sg_miter_get_next_page(miter))
0581         return false;
0582 
0583     miter->page = sg_page_iter_page(&miter->piter);
0584     miter->consumed = miter->length = miter->__remaining;
0585 
0586     if (miter->__flags & SG_MITER_ATOMIC)
0587         miter->addr = kmap_atomic(miter->page) + miter->__offset;
0588     else
0589         miter->addr = kmap(miter->page) + miter->__offset;
0590 
0591     return true;
0592 }
0593 EXPORT_SYMBOL(sg_miter_next);
0594 
0595 /**
0596  * sg_miter_stop - stop mapping iteration
0597  * @miter: sg mapping iter to be stopped
0598  *
0599  * Description:
0600  *   Stops mapping iterator @miter.  @miter should have been started
0601  *   using sg_miter_start().  A stopped iteration can be resumed by
0602  *   calling sg_miter_next() on it.  This is useful when resources (kmap)
0603  *   need to be released during iteration.
0604  *
0605  * Context:
0606  *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
0607  *   otherwise.
0608  */
0609 void sg_miter_stop(struct sg_mapping_iter *miter)
0610 {
0611     WARN_ON(miter->consumed > miter->length);
0612 
0613     /* drop resources from the last iteration */
0614     if (miter->addr) {
0615         miter->__offset += miter->consumed;
0616         miter->__remaining -= miter->consumed;
0617 
0618         if ((miter->__flags & SG_MITER_TO_SG) &&
0619             !PageSlab(miter->page))
0620             flush_kernel_dcache_page(miter->page);
0621 
0622         if (miter->__flags & SG_MITER_ATOMIC) {
0623             WARN_ON_ONCE(preemptible());
0624             kunmap_atomic(miter->addr);
0625         } else
0626             kunmap(miter->page);
0627 
0628         miter->page = NULL;
0629         miter->addr = NULL;
0630         miter->length = 0;
0631         miter->consumed = 0;
0632     }
0633 }
0634 EXPORT_SYMBOL(sg_miter_stop);
0635 
0636 /**
0637  * sg_copy_buffer - Copy data between a linear buffer and an SG list
0638  * @sgl:         The SG list
0639  * @nents:       Number of SG entries
0640  * @buf:         Where to copy from
0641  * @buflen:      The number of bytes to copy
0642  * @skip:        Number of bytes to skip before copying
0643  * @to_buffer:       transfer direction (true == from an sg list to a
0644  *           buffer, false == from a buffer to an sg list
0645  *
0646  * Returns the number of copied bytes.
0647  *
0648  **/
0649 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
0650               size_t buflen, off_t skip, bool to_buffer)
0651 {
0652     unsigned int offset = 0;
0653     struct sg_mapping_iter miter;
0654     unsigned long flags;
0655     unsigned int sg_flags = SG_MITER_ATOMIC;
0656 
0657     if (to_buffer)
0658         sg_flags |= SG_MITER_FROM_SG;
0659     else
0660         sg_flags |= SG_MITER_TO_SG;
0661 
0662     sg_miter_start(&miter, sgl, nents, sg_flags);
0663 
0664     if (!sg_miter_skip(&miter, skip))
0665         return false;
0666 
0667     local_irq_save(flags);
0668 
0669     while (sg_miter_next(&miter) && offset < buflen) {
0670         unsigned int len;
0671 
0672         len = min(miter.length, buflen - offset);
0673 
0674         if (to_buffer)
0675             memcpy(buf + offset, miter.addr, len);
0676         else
0677             memcpy(miter.addr, buf + offset, len);
0678 
0679         offset += len;
0680     }
0681 
0682     sg_miter_stop(&miter);
0683 
0684     local_irq_restore(flags);
0685     return offset;
0686 }
0687 EXPORT_SYMBOL(sg_copy_buffer);
0688 
0689 /**
0690  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
0691  * @sgl:         The SG list
0692  * @nents:       Number of SG entries
0693  * @buf:         Where to copy from
0694  * @buflen:      The number of bytes to copy
0695  *
0696  * Returns the number of copied bytes.
0697  *
0698  **/
0699 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
0700                const void *buf, size_t buflen)
0701 {
0702     return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
0703 }
0704 EXPORT_SYMBOL(sg_copy_from_buffer);
0705 
0706 /**
0707  * sg_copy_to_buffer - Copy from an SG list to a linear buffer
0708  * @sgl:         The SG list
0709  * @nents:       Number of SG entries
0710  * @buf:         Where to copy to
0711  * @buflen:      The number of bytes to copy
0712  *
0713  * Returns the number of copied bytes.
0714  *
0715  **/
0716 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
0717              void *buf, size_t buflen)
0718 {
0719     return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
0720 }
0721 EXPORT_SYMBOL(sg_copy_to_buffer);
0722 
0723 /**
0724  * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
0725  * @sgl:         The SG list
0726  * @nents:       Number of SG entries
0727  * @buf:         Where to copy from
0728  * @buflen:      The number of bytes to copy
0729  * @skip:        Number of bytes to skip before copying
0730  *
0731  * Returns the number of copied bytes.
0732  *
0733  **/
0734 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
0735                 const void *buf, size_t buflen, off_t skip)
0736 {
0737     return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
0738 }
0739 EXPORT_SYMBOL(sg_pcopy_from_buffer);
0740 
0741 /**
0742  * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
0743  * @sgl:         The SG list
0744  * @nents:       Number of SG entries
0745  * @buf:         Where to copy to
0746  * @buflen:      The number of bytes to copy
0747  * @skip:        Number of bytes to skip before copying
0748  *
0749  * Returns the number of copied bytes.
0750  *
0751  **/
0752 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
0753               void *buf, size_t buflen, off_t skip)
0754 {
0755     return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
0756 }
0757 EXPORT_SYMBOL(sg_pcopy_to_buffer);