Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
0004  *
0005  * Scatterlist handling helpers.
0006  */
0007 #include <linux/export.h>
0008 #include <linux/slab.h>
0009 #include <linux/scatterlist.h>
0010 #include <linux/highmem.h>
0011 #include <linux/kmemleak.h>
0012 
0013 /**
0014  * sg_next - return the next scatterlist entry in a list
0015  * @sg:     The current sg entry
0016  *
0017  * Description:
0018  *   Usually the next entry will be @sg@ + 1, but if this sg element is part
0019  *   of a chained scatterlist, it could jump to the start of a new
0020  *   scatterlist array.
0021  *
0022  **/
0023 struct scatterlist *sg_next(struct scatterlist *sg)
0024 {
0025     if (sg_is_last(sg))
0026         return NULL;
0027 
0028     sg++;
0029     if (unlikely(sg_is_chain(sg)))
0030         sg = sg_chain_ptr(sg);
0031 
0032     return sg;
0033 }
0034 EXPORT_SYMBOL(sg_next);
0035 
0036 /**
0037  * sg_nents - return total count of entries in scatterlist
0038  * @sg:     The scatterlist
0039  *
0040  * Description:
0041  * Allows to know how many entries are in sg, taking into account
0042  * chaining as well
0043  *
0044  **/
0045 int sg_nents(struct scatterlist *sg)
0046 {
0047     int nents;
0048     for (nents = 0; sg; sg = sg_next(sg))
0049         nents++;
0050     return nents;
0051 }
0052 EXPORT_SYMBOL(sg_nents);
0053 
0054 /**
0055  * sg_nents_for_len - return total count of entries in scatterlist
0056  *                    needed to satisfy the supplied length
0057  * @sg:     The scatterlist
0058  * @len:    The total required length
0059  *
0060  * Description:
0061  * Determines the number of entries in sg that are required to meet
0062  * the supplied length, taking into account chaining as well
0063  *
0064  * Returns:
0065  *   the number of sg entries needed, negative error on failure
0066  *
0067  **/
0068 int sg_nents_for_len(struct scatterlist *sg, u64 len)
0069 {
0070     int nents;
0071     u64 total;
0072 
0073     if (!len)
0074         return 0;
0075 
0076     for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
0077         nents++;
0078         total += sg->length;
0079         if (total >= len)
0080             return nents;
0081     }
0082 
0083     return -EINVAL;
0084 }
0085 EXPORT_SYMBOL(sg_nents_for_len);
0086 
0087 /**
0088  * sg_last - return the last scatterlist entry in a list
0089  * @sgl:    First entry in the scatterlist
0090  * @nents:  Number of entries in the scatterlist
0091  *
0092  * Description:
0093  *   Should only be used casually, it (currently) scans the entire list
0094  *   to get the last entry.
0095  *
0096  *   Note that the @sgl@ pointer passed in need not be the first one,
0097  *   the important bit is that @nents@ denotes the number of entries that
0098  *   exist from @sgl@.
0099  *
0100  **/
0101 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
0102 {
0103     struct scatterlist *sg, *ret = NULL;
0104     unsigned int i;
0105 
0106     for_each_sg(sgl, sg, nents, i)
0107         ret = sg;
0108 
0109     BUG_ON(!sg_is_last(ret));
0110     return ret;
0111 }
0112 EXPORT_SYMBOL(sg_last);
0113 
0114 /**
0115  * sg_init_table - Initialize SG table
0116  * @sgl:       The SG table
0117  * @nents:     Number of entries in table
0118  *
0119  * Notes:
0120  *   If this is part of a chained sg table, sg_mark_end() should be
0121  *   used only on the last table part.
0122  *
0123  **/
0124 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
0125 {
0126     memset(sgl, 0, sizeof(*sgl) * nents);
0127     sg_init_marker(sgl, nents);
0128 }
0129 EXPORT_SYMBOL(sg_init_table);
0130 
0131 /**
0132  * sg_init_one - Initialize a single entry sg list
0133  * @sg:      SG entry
0134  * @buf:     Virtual address for IO
0135  * @buflen:  IO length
0136  *
0137  **/
0138 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
0139 {
0140     sg_init_table(sg, 1);
0141     sg_set_buf(sg, buf, buflen);
0142 }
0143 EXPORT_SYMBOL(sg_init_one);
0144 
0145 /*
0146  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
0147  * helpers.
0148  */
0149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
0150 {
0151     if (nents == SG_MAX_SINGLE_ALLOC) {
0152         /*
0153          * Kmemleak doesn't track page allocations as they are not
0154          * commonly used (in a raw form) for kernel data structures.
0155          * As we chain together a list of pages and then a normal
0156          * kmalloc (tracked by kmemleak), in order to for that last
0157          * allocation not to become decoupled (and thus a
0158          * false-positive) we need to inform kmemleak of all the
0159          * intermediate allocations.
0160          */
0161         void *ptr = (void *) __get_free_page(gfp_mask);
0162         kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
0163         return ptr;
0164     } else
0165         return kmalloc_array(nents, sizeof(struct scatterlist),
0166                      gfp_mask);
0167 }
0168 
0169 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
0170 {
0171     if (nents == SG_MAX_SINGLE_ALLOC) {
0172         kmemleak_free(sg);
0173         free_page((unsigned long) sg);
0174     } else
0175         kfree(sg);
0176 }
0177 
0178 /**
0179  * __sg_free_table - Free a previously mapped sg table
0180  * @table:  The sg table header to use
0181  * @max_ents:   The maximum number of entries per single scatterlist
0182  * @nents_first_chunk: Number of entries int the (preallocated) first
0183  *  scatterlist chunk, 0 means no such preallocated first chunk
0184  * @free_fn:    Free function
0185  * @num_ents:   Number of entries in the table
0186  *
0187  *  Description:
0188  *    Free an sg table previously allocated and setup with
0189  *    __sg_alloc_table().  The @max_ents value must be identical to
0190  *    that previously used with __sg_alloc_table().
0191  *
0192  **/
0193 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
0194              unsigned int nents_first_chunk, sg_free_fn *free_fn,
0195              unsigned int num_ents)
0196 {
0197     struct scatterlist *sgl, *next;
0198     unsigned curr_max_ents = nents_first_chunk ?: max_ents;
0199 
0200     if (unlikely(!table->sgl))
0201         return;
0202 
0203     sgl = table->sgl;
0204     while (num_ents) {
0205         unsigned int alloc_size = num_ents;
0206         unsigned int sg_size;
0207 
0208         /*
0209          * If we have more than max_ents segments left,
0210          * then assign 'next' to the sg table after the current one.
0211          * sg_size is then one less than alloc size, since the last
0212          * element is the chain pointer.
0213          */
0214         if (alloc_size > curr_max_ents) {
0215             next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
0216             alloc_size = curr_max_ents;
0217             sg_size = alloc_size - 1;
0218         } else {
0219             sg_size = alloc_size;
0220             next = NULL;
0221         }
0222 
0223         num_ents -= sg_size;
0224         if (nents_first_chunk)
0225             nents_first_chunk = 0;
0226         else
0227             free_fn(sgl, alloc_size);
0228         sgl = next;
0229         curr_max_ents = max_ents;
0230     }
0231 
0232     table->sgl = NULL;
0233 }
0234 EXPORT_SYMBOL(__sg_free_table);
0235 
0236 /**
0237  * sg_free_append_table - Free a previously allocated append sg table.
0238  * @table:   The mapped sg append table header
0239  *
0240  **/
0241 void sg_free_append_table(struct sg_append_table *table)
0242 {
0243     __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
0244             table->total_nents);
0245 }
0246 EXPORT_SYMBOL(sg_free_append_table);
0247 
0248 
0249 /**
0250  * sg_free_table - Free a previously allocated sg table
0251  * @table:  The mapped sg table header
0252  *
0253  **/
0254 void sg_free_table(struct sg_table *table)
0255 {
0256     __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
0257             table->orig_nents);
0258 }
0259 EXPORT_SYMBOL(sg_free_table);
0260 
0261 /**
0262  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
0263  * @table:  The sg table header to use
0264  * @nents:  Number of entries in sg list
0265  * @max_ents:   The maximum number of entries the allocator returns per call
0266  * @nents_first_chunk: Number of entries int the (preallocated) first
0267  *  scatterlist chunk, 0 means no such preallocated chunk provided by user
0268  * @gfp_mask:   GFP allocation mask
0269  * @alloc_fn:   Allocator to use
0270  *
0271  * Description:
0272  *   This function returns a @table @nents long. The allocator is
0273  *   defined to return scatterlist chunks of maximum size @max_ents.
0274  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
0275  *   chained in units of @max_ents.
0276  *
0277  * Notes:
0278  *   If this function returns non-0 (eg failure), the caller must call
0279  *   __sg_free_table() to cleanup any leftover allocations.
0280  *
0281  **/
0282 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
0283              unsigned int max_ents, struct scatterlist *first_chunk,
0284              unsigned int nents_first_chunk, gfp_t gfp_mask,
0285              sg_alloc_fn *alloc_fn)
0286 {
0287     struct scatterlist *sg, *prv;
0288     unsigned int left;
0289     unsigned curr_max_ents = nents_first_chunk ?: max_ents;
0290     unsigned prv_max_ents;
0291 
0292     memset(table, 0, sizeof(*table));
0293 
0294     if (nents == 0)
0295         return -EINVAL;
0296 #ifdef CONFIG_ARCH_NO_SG_CHAIN
0297     if (WARN_ON_ONCE(nents > max_ents))
0298         return -EINVAL;
0299 #endif
0300 
0301     left = nents;
0302     prv = NULL;
0303     do {
0304         unsigned int sg_size, alloc_size = left;
0305 
0306         if (alloc_size > curr_max_ents) {
0307             alloc_size = curr_max_ents;
0308             sg_size = alloc_size - 1;
0309         } else
0310             sg_size = alloc_size;
0311 
0312         left -= sg_size;
0313 
0314         if (first_chunk) {
0315             sg = first_chunk;
0316             first_chunk = NULL;
0317         } else {
0318             sg = alloc_fn(alloc_size, gfp_mask);
0319         }
0320         if (unlikely(!sg)) {
0321             /*
0322              * Adjust entry count to reflect that the last
0323              * entry of the previous table won't be used for
0324              * linkage.  Without this, sg_kfree() may get
0325              * confused.
0326              */
0327             if (prv)
0328                 table->nents = ++table->orig_nents;
0329 
0330             return -ENOMEM;
0331         }
0332 
0333         sg_init_table(sg, alloc_size);
0334         table->nents = table->orig_nents += sg_size;
0335 
0336         /*
0337          * If this is the first mapping, assign the sg table header.
0338          * If this is not the first mapping, chain previous part.
0339          */
0340         if (prv)
0341             sg_chain(prv, prv_max_ents, sg);
0342         else
0343             table->sgl = sg;
0344 
0345         /*
0346          * If no more entries after this one, mark the end
0347          */
0348         if (!left)
0349             sg_mark_end(&sg[sg_size - 1]);
0350 
0351         prv = sg;
0352         prv_max_ents = curr_max_ents;
0353         curr_max_ents = max_ents;
0354     } while (left);
0355 
0356     return 0;
0357 }
0358 EXPORT_SYMBOL(__sg_alloc_table);
0359 
0360 /**
0361  * sg_alloc_table - Allocate and initialize an sg table
0362  * @table:  The sg table header to use
0363  * @nents:  Number of entries in sg list
0364  * @gfp_mask:   GFP allocation mask
0365  *
0366  *  Description:
0367  *    Allocate and initialize an sg table. If @nents@ is larger than
0368  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
0369  *
0370  **/
0371 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
0372 {
0373     int ret;
0374 
0375     ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
0376                    NULL, 0, gfp_mask, sg_kmalloc);
0377     if (unlikely(ret))
0378         sg_free_table(table);
0379     return ret;
0380 }
0381 EXPORT_SYMBOL(sg_alloc_table);
0382 
0383 static struct scatterlist *get_next_sg(struct sg_append_table *table,
0384                        struct scatterlist *cur,
0385                        unsigned long needed_sges,
0386                        gfp_t gfp_mask)
0387 {
0388     struct scatterlist *new_sg, *next_sg;
0389     unsigned int alloc_size;
0390 
0391     if (cur) {
0392         next_sg = sg_next(cur);
0393         /* Check if last entry should be keeped for chainning */
0394         if (!sg_is_last(next_sg) || needed_sges == 1)
0395             return next_sg;
0396     }
0397 
0398     alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
0399     new_sg = sg_kmalloc(alloc_size, gfp_mask);
0400     if (!new_sg)
0401         return ERR_PTR(-ENOMEM);
0402     sg_init_table(new_sg, alloc_size);
0403     if (cur) {
0404         table->total_nents += alloc_size - 1;
0405         __sg_chain(next_sg, new_sg);
0406     } else {
0407         table->sgt.sgl = new_sg;
0408         table->total_nents = alloc_size;
0409     }
0410     return new_sg;
0411 }
0412 
0413 /**
0414  * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
0415  *                                    table from an array of pages
0416  * @sgt_append:  The sg append table to use
0417  * @pages:       Pointer to an array of page pointers
0418  * @n_pages:     Number of pages in the pages array
0419  * @offset:      Offset from start of the first page to the start of a buffer
0420  * @size:        Number of valid bytes in the buffer (after offset)
0421  * @max_segment: Maximum size of a scatterlist element in bytes
0422  * @left_pages:  Left pages caller have to set after this call
0423  * @gfp_mask:    GFP allocation mask
0424  *
0425  * Description:
0426  *    In the first call it allocate and initialize an sg table from a list of
0427  *    pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
0428  *    the pages are squashed into a single scatterlist entry up to the maximum
0429  *    size specified in @max_segment.  A user may provide an offset at a start
0430  *    and a size of valid data in a buffer specified by the page array. The
0431  *    returned sg table is released by sg_free_append_table
0432  *
0433  * Returns:
0434  *   0 on success, negative error on failure
0435  *
0436  * Notes:
0437  *   If this function returns non-0 (eg failure), the caller must call
0438  *   sg_free_append_table() to cleanup any leftover allocations.
0439  *
0440  *   In the fist call, sgt_append must by initialized.
0441  */
0442 int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
0443         struct page **pages, unsigned int n_pages, unsigned int offset,
0444         unsigned long size, unsigned int max_segment,
0445         unsigned int left_pages, gfp_t gfp_mask)
0446 {
0447     unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
0448     unsigned int added_nents = 0;
0449     struct scatterlist *s = sgt_append->prv;
0450 
0451     /*
0452      * The algorithm below requires max_segment to be aligned to PAGE_SIZE
0453      * otherwise it can overshoot.
0454      */
0455     max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
0456     if (WARN_ON(max_segment < PAGE_SIZE))
0457         return -EINVAL;
0458 
0459     if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
0460         return -EOPNOTSUPP;
0461 
0462     if (sgt_append->prv) {
0463         unsigned long paddr =
0464             (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE +
0465              sgt_append->prv->offset + sgt_append->prv->length) /
0466             PAGE_SIZE;
0467 
0468         if (WARN_ON(offset))
0469             return -EINVAL;
0470 
0471         /* Merge contiguous pages into the last SG */
0472         prv_len = sgt_append->prv->length;
0473         while (n_pages && page_to_pfn(pages[0]) == paddr) {
0474             if (sgt_append->prv->length + PAGE_SIZE > max_segment)
0475                 break;
0476             sgt_append->prv->length += PAGE_SIZE;
0477             paddr++;
0478             pages++;
0479             n_pages--;
0480         }
0481         if (!n_pages)
0482             goto out;
0483     }
0484 
0485     /* compute number of contiguous chunks */
0486     chunks = 1;
0487     seg_len = 0;
0488     for (i = 1; i < n_pages; i++) {
0489         seg_len += PAGE_SIZE;
0490         if (seg_len >= max_segment ||
0491             page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
0492             chunks++;
0493             seg_len = 0;
0494         }
0495     }
0496 
0497     /* merging chunks and putting them into the scatterlist */
0498     cur_page = 0;
0499     for (i = 0; i < chunks; i++) {
0500         unsigned int j, chunk_size;
0501 
0502         /* look for the end of the current chunk */
0503         seg_len = 0;
0504         for (j = cur_page + 1; j < n_pages; j++) {
0505             seg_len += PAGE_SIZE;
0506             if (seg_len >= max_segment ||
0507                 page_to_pfn(pages[j]) !=
0508                 page_to_pfn(pages[j - 1]) + 1)
0509                 break;
0510         }
0511 
0512         /* Pass how many chunks might be left */
0513         s = get_next_sg(sgt_append, s, chunks - i + left_pages,
0514                 gfp_mask);
0515         if (IS_ERR(s)) {
0516             /*
0517              * Adjust entry length to be as before function was
0518              * called.
0519              */
0520             if (sgt_append->prv)
0521                 sgt_append->prv->length = prv_len;
0522             return PTR_ERR(s);
0523         }
0524         chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
0525         sg_set_page(s, pages[cur_page],
0526                 min_t(unsigned long, size, chunk_size), offset);
0527         added_nents++;
0528         size -= chunk_size;
0529         offset = 0;
0530         cur_page = j;
0531     }
0532     sgt_append->sgt.nents += added_nents;
0533     sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
0534     sgt_append->prv = s;
0535 out:
0536     if (!left_pages)
0537         sg_mark_end(s);
0538     return 0;
0539 }
0540 EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
0541 
0542 /**
0543  * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
0544  *                                     an array of pages and given maximum
0545  *                                     segment.
0546  * @sgt:     The sg table header to use
0547  * @pages:   Pointer to an array of page pointers
0548  * @n_pages:     Number of pages in the pages array
0549  * @offset:      Offset from start of the first page to the start of a buffer
0550  * @size:        Number of valid bytes in the buffer (after offset)
0551  * @max_segment: Maximum size of a scatterlist element in bytes
0552  * @gfp_mask:    GFP allocation mask
0553  *
0554  *  Description:
0555  *    Allocate and initialize an sg table from a list of pages. Contiguous
0556  *    ranges of the pages are squashed into a single scatterlist node up to the
0557  *    maximum size specified in @max_segment. A user may provide an offset at a
0558  *    start and a size of valid data in a buffer specified by the page array.
0559  *
0560  *    The returned sg table is released by sg_free_table.
0561  *
0562  *  Returns:
0563  *   0 on success, negative error on failure
0564  */
0565 int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
0566                 unsigned int n_pages, unsigned int offset,
0567                 unsigned long size, unsigned int max_segment,
0568                 gfp_t gfp_mask)
0569 {
0570     struct sg_append_table append = {};
0571     int err;
0572 
0573     err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
0574                            size, max_segment, 0, gfp_mask);
0575     if (err) {
0576         sg_free_append_table(&append);
0577         return err;
0578     }
0579     memcpy(sgt, &append.sgt, sizeof(*sgt));
0580     WARN_ON(append.total_nents != sgt->orig_nents);
0581     return 0;
0582 }
0583 EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
0584 
0585 #ifdef CONFIG_SGL_ALLOC
0586 
0587 /**
0588  * sgl_alloc_order - allocate a scatterlist and its pages
0589  * @length: Length in bytes of the scatterlist. Must be at least one
0590  * @order: Second argument for alloc_pages()
0591  * @chainable: Whether or not to allocate an extra element in the scatterlist
0592  *  for scatterlist chaining purposes
0593  * @gfp: Memory allocation flags
0594  * @nent_p: [out] Number of entries in the scatterlist that have pages
0595  *
0596  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
0597  */
0598 struct scatterlist *sgl_alloc_order(unsigned long long length,
0599                     unsigned int order, bool chainable,
0600                     gfp_t gfp, unsigned int *nent_p)
0601 {
0602     struct scatterlist *sgl, *sg;
0603     struct page *page;
0604     unsigned int nent, nalloc;
0605     u32 elem_len;
0606 
0607     nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
0608     /* Check for integer overflow */
0609     if (length > (nent << (PAGE_SHIFT + order)))
0610         return NULL;
0611     nalloc = nent;
0612     if (chainable) {
0613         /* Check for integer overflow */
0614         if (nalloc + 1 < nalloc)
0615             return NULL;
0616         nalloc++;
0617     }
0618     sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
0619                 gfp & ~GFP_DMA);
0620     if (!sgl)
0621         return NULL;
0622 
0623     sg_init_table(sgl, nalloc);
0624     sg = sgl;
0625     while (length) {
0626         elem_len = min_t(u64, length, PAGE_SIZE << order);
0627         page = alloc_pages(gfp, order);
0628         if (!page) {
0629             sgl_free_order(sgl, order);
0630             return NULL;
0631         }
0632 
0633         sg_set_page(sg, page, elem_len, 0);
0634         length -= elem_len;
0635         sg = sg_next(sg);
0636     }
0637     WARN_ONCE(length, "length = %lld\n", length);
0638     if (nent_p)
0639         *nent_p = nent;
0640     return sgl;
0641 }
0642 EXPORT_SYMBOL(sgl_alloc_order);
0643 
0644 /**
0645  * sgl_alloc - allocate a scatterlist and its pages
0646  * @length: Length in bytes of the scatterlist
0647  * @gfp: Memory allocation flags
0648  * @nent_p: [out] Number of entries in the scatterlist
0649  *
0650  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
0651  */
0652 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
0653                   unsigned int *nent_p)
0654 {
0655     return sgl_alloc_order(length, 0, false, gfp, nent_p);
0656 }
0657 EXPORT_SYMBOL(sgl_alloc);
0658 
0659 /**
0660  * sgl_free_n_order - free a scatterlist and its pages
0661  * @sgl: Scatterlist with one or more elements
0662  * @nents: Maximum number of elements to free
0663  * @order: Second argument for __free_pages()
0664  *
0665  * Notes:
0666  * - If several scatterlists have been chained and each chain element is
0667  *   freed separately then it's essential to set nents correctly to avoid that a
0668  *   page would get freed twice.
0669  * - All pages in a chained scatterlist can be freed at once by setting @nents
0670  *   to a high number.
0671  */
0672 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
0673 {
0674     struct scatterlist *sg;
0675     struct page *page;
0676     int i;
0677 
0678     for_each_sg(sgl, sg, nents, i) {
0679         if (!sg)
0680             break;
0681         page = sg_page(sg);
0682         if (page)
0683             __free_pages(page, order);
0684     }
0685     kfree(sgl);
0686 }
0687 EXPORT_SYMBOL(sgl_free_n_order);
0688 
0689 /**
0690  * sgl_free_order - free a scatterlist and its pages
0691  * @sgl: Scatterlist with one or more elements
0692  * @order: Second argument for __free_pages()
0693  */
0694 void sgl_free_order(struct scatterlist *sgl, int order)
0695 {
0696     sgl_free_n_order(sgl, INT_MAX, order);
0697 }
0698 EXPORT_SYMBOL(sgl_free_order);
0699 
0700 /**
0701  * sgl_free - free a scatterlist and its pages
0702  * @sgl: Scatterlist with one or more elements
0703  */
0704 void sgl_free(struct scatterlist *sgl)
0705 {
0706     sgl_free_order(sgl, 0);
0707 }
0708 EXPORT_SYMBOL(sgl_free);
0709 
0710 #endif /* CONFIG_SGL_ALLOC */
0711 
0712 void __sg_page_iter_start(struct sg_page_iter *piter,
0713               struct scatterlist *sglist, unsigned int nents,
0714               unsigned long pgoffset)
0715 {
0716     piter->__pg_advance = 0;
0717     piter->__nents = nents;
0718 
0719     piter->sg = sglist;
0720     piter->sg_pgoffset = pgoffset;
0721 }
0722 EXPORT_SYMBOL(__sg_page_iter_start);
0723 
0724 static int sg_page_count(struct scatterlist *sg)
0725 {
0726     return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
0727 }
0728 
0729 bool __sg_page_iter_next(struct sg_page_iter *piter)
0730 {
0731     if (!piter->__nents || !piter->sg)
0732         return false;
0733 
0734     piter->sg_pgoffset += piter->__pg_advance;
0735     piter->__pg_advance = 1;
0736 
0737     while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
0738         piter->sg_pgoffset -= sg_page_count(piter->sg);
0739         piter->sg = sg_next(piter->sg);
0740         if (!--piter->__nents || !piter->sg)
0741             return false;
0742     }
0743 
0744     return true;
0745 }
0746 EXPORT_SYMBOL(__sg_page_iter_next);
0747 
0748 static int sg_dma_page_count(struct scatterlist *sg)
0749 {
0750     return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
0751 }
0752 
0753 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
0754 {
0755     struct sg_page_iter *piter = &dma_iter->base;
0756 
0757     if (!piter->__nents || !piter->sg)
0758         return false;
0759 
0760     piter->sg_pgoffset += piter->__pg_advance;
0761     piter->__pg_advance = 1;
0762 
0763     while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
0764         piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
0765         piter->sg = sg_next(piter->sg);
0766         if (!--piter->__nents || !piter->sg)
0767             return false;
0768     }
0769 
0770     return true;
0771 }
0772 EXPORT_SYMBOL(__sg_page_iter_dma_next);
0773 
0774 /**
0775  * sg_miter_start - start mapping iteration over a sg list
0776  * @miter: sg mapping iter to be started
0777  * @sgl: sg list to iterate over
0778  * @nents: number of sg entries
0779  *
0780  * Description:
0781  *   Starts mapping iterator @miter.
0782  *
0783  * Context:
0784  *   Don't care.
0785  */
0786 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
0787             unsigned int nents, unsigned int flags)
0788 {
0789     memset(miter, 0, sizeof(struct sg_mapping_iter));
0790 
0791     __sg_page_iter_start(&miter->piter, sgl, nents, 0);
0792     WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
0793     miter->__flags = flags;
0794 }
0795 EXPORT_SYMBOL(sg_miter_start);
0796 
0797 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
0798 {
0799     if (!miter->__remaining) {
0800         struct scatterlist *sg;
0801 
0802         if (!__sg_page_iter_next(&miter->piter))
0803             return false;
0804 
0805         sg = miter->piter.sg;
0806 
0807         miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
0808         miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
0809         miter->__offset &= PAGE_SIZE - 1;
0810         miter->__remaining = sg->offset + sg->length -
0811                      (miter->piter.sg_pgoffset << PAGE_SHIFT) -
0812                      miter->__offset;
0813         miter->__remaining = min_t(unsigned long, miter->__remaining,
0814                        PAGE_SIZE - miter->__offset);
0815     }
0816 
0817     return true;
0818 }
0819 
0820 /**
0821  * sg_miter_skip - reposition mapping iterator
0822  * @miter: sg mapping iter to be skipped
0823  * @offset: number of bytes to plus the current location
0824  *
0825  * Description:
0826  *   Sets the offset of @miter to its current location plus @offset bytes.
0827  *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
0828  *   stops @miter.
0829  *
0830  * Context:
0831  *   Don't care.
0832  *
0833  * Returns:
0834  *   true if @miter contains the valid mapping.  false if end of sg
0835  *   list is reached.
0836  */
0837 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
0838 {
0839     sg_miter_stop(miter);
0840 
0841     while (offset) {
0842         off_t consumed;
0843 
0844         if (!sg_miter_get_next_page(miter))
0845             return false;
0846 
0847         consumed = min_t(off_t, offset, miter->__remaining);
0848         miter->__offset += consumed;
0849         miter->__remaining -= consumed;
0850         offset -= consumed;
0851     }
0852 
0853     return true;
0854 }
0855 EXPORT_SYMBOL(sg_miter_skip);
0856 
0857 /**
0858  * sg_miter_next - proceed mapping iterator to the next mapping
0859  * @miter: sg mapping iter to proceed
0860  *
0861  * Description:
0862  *   Proceeds @miter to the next mapping.  @miter should have been started
0863  *   using sg_miter_start().  On successful return, @miter->page,
0864  *   @miter->addr and @miter->length point to the current mapping.
0865  *
0866  * Context:
0867  *   May sleep if !SG_MITER_ATOMIC.
0868  *
0869  * Returns:
0870  *   true if @miter contains the next mapping.  false if end of sg
0871  *   list is reached.
0872  */
0873 bool sg_miter_next(struct sg_mapping_iter *miter)
0874 {
0875     sg_miter_stop(miter);
0876 
0877     /*
0878      * Get to the next page if necessary.
0879      * __remaining, __offset is adjusted by sg_miter_stop
0880      */
0881     if (!sg_miter_get_next_page(miter))
0882         return false;
0883 
0884     miter->page = sg_page_iter_page(&miter->piter);
0885     miter->consumed = miter->length = miter->__remaining;
0886 
0887     if (miter->__flags & SG_MITER_ATOMIC)
0888         miter->addr = kmap_atomic(miter->page) + miter->__offset;
0889     else
0890         miter->addr = kmap(miter->page) + miter->__offset;
0891 
0892     return true;
0893 }
0894 EXPORT_SYMBOL(sg_miter_next);
0895 
0896 /**
0897  * sg_miter_stop - stop mapping iteration
0898  * @miter: sg mapping iter to be stopped
0899  *
0900  * Description:
0901  *   Stops mapping iterator @miter.  @miter should have been started
0902  *   using sg_miter_start().  A stopped iteration can be resumed by
0903  *   calling sg_miter_next() on it.  This is useful when resources (kmap)
0904  *   need to be released during iteration.
0905  *
0906  * Context:
0907  *   Don't care otherwise.
0908  */
0909 void sg_miter_stop(struct sg_mapping_iter *miter)
0910 {
0911     WARN_ON(miter->consumed > miter->length);
0912 
0913     /* drop resources from the last iteration */
0914     if (miter->addr) {
0915         miter->__offset += miter->consumed;
0916         miter->__remaining -= miter->consumed;
0917 
0918         if (miter->__flags & SG_MITER_TO_SG)
0919             flush_dcache_page(miter->page);
0920 
0921         if (miter->__flags & SG_MITER_ATOMIC) {
0922             WARN_ON_ONCE(!pagefault_disabled());
0923             kunmap_atomic(miter->addr);
0924         } else
0925             kunmap(miter->page);
0926 
0927         miter->page = NULL;
0928         miter->addr = NULL;
0929         miter->length = 0;
0930         miter->consumed = 0;
0931     }
0932 }
0933 EXPORT_SYMBOL(sg_miter_stop);
0934 
0935 /**
0936  * sg_copy_buffer - Copy data between a linear buffer and an SG list
0937  * @sgl:         The SG list
0938  * @nents:       Number of SG entries
0939  * @buf:         Where to copy from
0940  * @buflen:      The number of bytes to copy
0941  * @skip:        Number of bytes to skip before copying
0942  * @to_buffer:       transfer direction (true == from an sg list to a
0943  *           buffer, false == from a buffer to an sg list)
0944  *
0945  * Returns the number of copied bytes.
0946  *
0947  **/
0948 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
0949               size_t buflen, off_t skip, bool to_buffer)
0950 {
0951     unsigned int offset = 0;
0952     struct sg_mapping_iter miter;
0953     unsigned int sg_flags = SG_MITER_ATOMIC;
0954 
0955     if (to_buffer)
0956         sg_flags |= SG_MITER_FROM_SG;
0957     else
0958         sg_flags |= SG_MITER_TO_SG;
0959 
0960     sg_miter_start(&miter, sgl, nents, sg_flags);
0961 
0962     if (!sg_miter_skip(&miter, skip))
0963         return 0;
0964 
0965     while ((offset < buflen) && sg_miter_next(&miter)) {
0966         unsigned int len;
0967 
0968         len = min(miter.length, buflen - offset);
0969 
0970         if (to_buffer)
0971             memcpy(buf + offset, miter.addr, len);
0972         else
0973             memcpy(miter.addr, buf + offset, len);
0974 
0975         offset += len;
0976     }
0977 
0978     sg_miter_stop(&miter);
0979 
0980     return offset;
0981 }
0982 EXPORT_SYMBOL(sg_copy_buffer);
0983 
0984 /**
0985  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
0986  * @sgl:         The SG list
0987  * @nents:       Number of SG entries
0988  * @buf:         Where to copy from
0989  * @buflen:      The number of bytes to copy
0990  *
0991  * Returns the number of copied bytes.
0992  *
0993  **/
0994 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
0995                const void *buf, size_t buflen)
0996 {
0997     return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
0998 }
0999 EXPORT_SYMBOL(sg_copy_from_buffer);
1000 
1001 /**
1002  * sg_copy_to_buffer - Copy from an SG list to a linear buffer
1003  * @sgl:         The SG list
1004  * @nents:       Number of SG entries
1005  * @buf:         Where to copy to
1006  * @buflen:      The number of bytes to copy
1007  *
1008  * Returns the number of copied bytes.
1009  *
1010  **/
1011 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1012              void *buf, size_t buflen)
1013 {
1014     return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
1015 }
1016 EXPORT_SYMBOL(sg_copy_to_buffer);
1017 
1018 /**
1019  * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
1020  * @sgl:         The SG list
1021  * @nents:       Number of SG entries
1022  * @buf:         Where to copy from
1023  * @buflen:      The number of bytes to copy
1024  * @skip:        Number of bytes to skip before copying
1025  *
1026  * Returns the number of copied bytes.
1027  *
1028  **/
1029 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1030                 const void *buf, size_t buflen, off_t skip)
1031 {
1032     return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
1033 }
1034 EXPORT_SYMBOL(sg_pcopy_from_buffer);
1035 
1036 /**
1037  * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
1038  * @sgl:         The SG list
1039  * @nents:       Number of SG entries
1040  * @buf:         Where to copy to
1041  * @buflen:      The number of bytes to copy
1042  * @skip:        Number of bytes to skip before copying
1043  *
1044  * Returns the number of copied bytes.
1045  *
1046  **/
1047 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1048               void *buf, size_t buflen, off_t skip)
1049 {
1050     return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
1051 }
1052 EXPORT_SYMBOL(sg_pcopy_to_buffer);
1053 
1054 /**
1055  * sg_zero_buffer - Zero-out a part of a SG list
1056  * @sgl:         The SG list
1057  * @nents:       Number of SG entries
1058  * @buflen:      The number of bytes to zero out
1059  * @skip:        Number of bytes to skip before zeroing
1060  *
1061  * Returns the number of bytes zeroed.
1062  **/
1063 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
1064                size_t buflen, off_t skip)
1065 {
1066     unsigned int offset = 0;
1067     struct sg_mapping_iter miter;
1068     unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1069 
1070     sg_miter_start(&miter, sgl, nents, sg_flags);
1071 
1072     if (!sg_miter_skip(&miter, skip))
1073         return false;
1074 
1075     while (offset < buflen && sg_miter_next(&miter)) {
1076         unsigned int len;
1077 
1078         len = min(miter.length, buflen - offset);
1079         memset(miter.addr, 0, len);
1080 
1081         offset += len;
1082     }
1083 
1084     sg_miter_stop(&miter);
1085     return offset;
1086 }
1087 EXPORT_SYMBOL(sg_zero_buffer);